Browse Source

deps: update V8 to 5.4.500.27

Pick up latest commit from the 5.4-lkgr branch.

deps: edit V8 gitignore to allow trace event copy
deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8
deps: edit V8 gitignore to allow gtest_prod.h copy
deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87

PR-URL: https://github.com/nodejs/node/pull/8317
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
v6
Michaël Zasso 8 years ago
parent
commit
ec02b811a8
  1. 27
      deps/v8/.gitignore
  2. 51
      deps/v8/.gn
  3. 8
      deps/v8/AUTHORS
  4. 1015
      deps/v8/BUILD.gn
  5. 4959
      deps/v8/ChangeLog
  6. 67
      deps/v8/DEPS
  7. 0
      deps/v8/LICENSE.fdlibm
  8. 98
      deps/v8/Makefile
  9. 6
      deps/v8/Makefile.android
  10. 97
      deps/v8/Makefile.nacl
  11. 4
      deps/v8/OWNERS
  12. 19
      deps/v8/PRESUBMIT.py
  13. 8
      deps/v8/WATCHLISTS
  14. 34
      deps/v8/base/trace_event/common/trace_event_common.h
  15. 21
      deps/v8/build/has_valgrind.py
  16. 18
      deps/v8/build_overrides/build.gni
  17. 15
      deps/v8/build_overrides/gtest.gni
  18. 26
      deps/v8/build_overrides/v8.gni
  19. 175
      deps/v8/gni/isolate.gni
  20. 108
      deps/v8/gni/v8.gni
  21. 0
      deps/v8/gypfiles/OWNERS
  22. 0
      deps/v8/gypfiles/README.txt
  23. 14
      deps/v8/gypfiles/all.gyp
  24. 0
      deps/v8/gypfiles/config/win/msvs_dependencies.isolate
  25. 0
      deps/v8/gypfiles/coverage_wrapper.py
  26. 0
      deps/v8/gypfiles/detect_v8_host_arch.py
  27. 0
      deps/v8/gypfiles/download_gold_plugin.py
  28. 0
      deps/v8/gypfiles/features.gypi
  29. 1
      deps/v8/gypfiles/get_landmines.py
  30. 6
      deps/v8/gypfiles/gyp_environment.py
  31. 8
      deps/v8/gypfiles/gyp_v8
  32. 0
      deps/v8/gypfiles/gyp_v8.py
  33. 6
      deps/v8/gypfiles/isolate.gypi
  34. 0
      deps/v8/gypfiles/landmine_utils.py
  35. 2
      deps/v8/gypfiles/landmines.py
  36. 0
      deps/v8/gypfiles/mac/asan.gyp
  37. 59
      deps/v8/gypfiles/set_clang_warning_flags.gypi
  38. 0
      deps/v8/gypfiles/shim_headers.gypi
  39. 250
      deps/v8/gypfiles/standalone.gypi
  40. 99
      deps/v8/gypfiles/toolchain.gypi
  41. 235
      deps/v8/gypfiles/vs_toolchain.py
  42. 5
      deps/v8/include/libplatform/DEPS
  43. 9
      deps/v8/include/libplatform/libplatform.h
  44. 253
      deps/v8/include/libplatform/v8-tracing.h
  45. 27
      deps/v8/include/v8-debug.h
  46. 4
      deps/v8/include/v8-experimental.h
  47. 6
      deps/v8/include/v8-platform.h
  48. 99
      deps/v8/include/v8-profiler.h
  49. 25
      deps/v8/include/v8-util.h
  50. 6
      deps/v8/include/v8-version.h
  51. 887
      deps/v8/include/v8.h
  52. 4
      deps/v8/include/v8config.h
  53. 58
      deps/v8/infra/config/cq.cfg
  54. 35
      deps/v8/infra/mb/PRESUBMIT.py
  55. 670
      deps/v8/infra/mb/mb_config.pyl
  56. 18
      deps/v8/samples/hello-world.cc
  57. 17
      deps/v8/samples/process.cc
  58. 14
      deps/v8/samples/samples.gyp
  59. 18
      deps/v8/samples/shell.cc
  60. 81
      deps/v8/snapshot_toolchain.gni
  61. 8
      deps/v8/src/DEPS
  62. 420
      deps/v8/src/accessors.cc
  63. 23
      deps/v8/src/accessors.h
  64. 4
      deps/v8/src/address-map.cc
  65. 181
      deps/v8/src/address-map.h
  66. 3
      deps/v8/src/allocation-site-scopes.cc
  67. 2
      deps/v8/src/allocation-site-scopes.h
  68. 11
      deps/v8/src/allocation.cc
  69. 6
      deps/v8/src/allocation.h
  70. 116
      deps/v8/src/api-arguments-inl.h
  71. 12
      deps/v8/src/api-arguments.cc
  72. 105
      deps/v8/src/api-arguments.h
  73. 10
      deps/v8/src/api-experimental.cc
  74. 559
      deps/v8/src/api-natives.cc
  75. 4
      deps/v8/src/api-natives.h
  76. 2122
      deps/v8/src/api.cc
  77. 96
      deps/v8/src/api.h
  78. 42
      deps/v8/src/arguments.h
  79. 37
      deps/v8/src/arm/assembler-arm-inl.h
  80. 326
      deps/v8/src/arm/assembler-arm.cc
  81. 89
      deps/v8/src/arm/assembler-arm.h
  82. 795
      deps/v8/src/arm/code-stubs-arm.cc
  83. 165
      deps/v8/src/arm/codegen-arm.cc
  84. 17
      deps/v8/src/arm/codegen-arm.h
  85. 61
      deps/v8/src/arm/deoptimizer-arm.cc
  86. 106
      deps/v8/src/arm/disasm-arm.cc
  87. 64
      deps/v8/src/arm/eh-frame-arm.cc
  88. 110
      deps/v8/src/arm/interface-descriptors-arm.cc
  89. 433
      deps/v8/src/arm/macro-assembler-arm.cc
  90. 62
      deps/v8/src/arm/macro-assembler-arm.h
  91. 300
      deps/v8/src/arm/simulator-arm.cc
  92. 16
      deps/v8/src/arm/simulator-arm.h
  93. 37
      deps/v8/src/arm64/assembler-arm64-inl.h
  94. 161
      deps/v8/src/arm64/assembler-arm64.cc
  95. 65
      deps/v8/src/arm64/assembler-arm64.h
  96. 812
      deps/v8/src/arm64/code-stubs-arm64.cc
  97. 206
      deps/v8/src/arm64/codegen-arm64.cc
  98. 17
      deps/v8/src/arm64/codegen-arm64.h
  99. 189
      deps/v8/src/arm64/constants-arm64.h
  100. 18
      deps/v8/src/arm64/cpu-arm64.cc

27
deps/v8/.gitignore

@ -40,15 +40,13 @@ gcsuspects
shell
shell_g
/_*
/build/Debug
/build/gyp
/build/ipch
/build/Release
/build/win_toolchain.json
/build
/gypfiles/win_toolchain.json
/buildtools
/hydrogen.cfg
/obj
/out
/out.gn
/perf.data
/perf.data.old
/test/benchmarks/data
@ -59,20 +57,35 @@ shell_g
/test/simdjs/data
/test/test262/data
/test/test262/data.tar
/test/test262/harness
/testing/gmock
/testing/gtest
/testing/gtest/*
!/testing/gtest/include
/testing/gtest/include/*
!/testing/gtest/include/gtest
/testing/gtest/include/gtest/*
!/testing/gtest/include/gtest/gtest_prod.h
/third_party
/third_party/android_tools
/third_party/cygwin
/third_party/icu
/third_party/instrumented_libraries
/third_party/inspector_protocol
/third_party/jinga2
/third_party/llvm
/third_party/llvm-build
/third_party/markupsafe
/third_party/WebKit
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
/tools/gyp
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go/linux64/isolate
/tools/luci-go/mac64/isolate
/tools/luci-go/win64/isolate.exe
/tools/mb
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client
@ -86,7 +99,9 @@ GTAGS
GRTAGS
GSYMS
GPATH
tags
gtags.files
turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json

51
deps/v8/.gn

@ -0,0 +1,51 @@
# This file is used by the GN meta build system to find the root of the source
# tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line.
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
# The secondary source root is a parallel directory tree where
# GN build files are placed when they can not be placed directly
# in the source tree, e.g. for third party source trees.
secondary_source = "//build/secondary/"
# These are the targets to check headers for by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
check_targets = [
]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls.
exec_script_whitelist = [
"//build/config/android/BUILD.gn",
"//build/config/android/config.gni",
"//build/config/android/internal_rules.gni",
"//build/config/android/rules.gni",
"//build/config/BUILD.gn",
"//build/config/compiler/BUILD.gn",
"//build/config/gcc/gcc_version.gni",
"//build/config/ios/ios_sdk.gni",
"//build/config/linux/atk/BUILD.gn",
"//build/config/linux/BUILD.gn",
"//build/config/linux/pkg_config.gni",
"//build/config/mac/mac_sdk.gni",
"//build/config/posix/BUILD.gn",
"//build/config/sysroot.gni",
"//build/config/win/BUILD.gn",
"//build/config/win/visual_studio_version.gni",
"//build/gn_helpers.py",
"//build/gypi_to_gn.py",
"//build/toolchain/concurrent_links.gni",
"//build/toolchain/gcc_toolchain.gni",
"//build/toolchain/mac/BUILD.gn",
"//build/toolchain/win/BUILD.gn",
"//build/util/branding.gni",
"//build/util/version.gni",
"//test/cctest/BUILD.gn",
"//test/test262/BUILD.gn",
"//test/unittests/BUILD.gn",
]

8
deps/v8/AUTHORS

@ -40,6 +40,7 @@ Alexis Campailla <alexis@janeasystems.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <addaleax@gmail.com>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
@ -50,7 +51,9 @@ Craig Schlenter <craig.schlenter@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
@ -62,6 +65,7 @@ Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Han Choongwoo <cwhan.tunz@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jan de Mooij <jandemooij@gmail.com>
@ -85,11 +89,13 @@ Matthew Sporleder <msporleder@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Lutz <michi@icosahedron.de>
Michael Smith <mike@w3.org>
Michaël Zasso <mic.besace@gmail.com>
Mike Gilbert <floppymaster@gmail.com>
Mike Pennisi <mike@mikepennisi.com>
Milton Chiang <milton.chiang@mediatek.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
@ -113,4 +119,4 @@ Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
柳荣一 <admin@web-tinker.com>
柳荣一 <admin@web-tinker.com>

1015
deps/v8/BUILD.gn

File diff suppressed because it is too large

4959
deps/v8/ChangeLog

File diff suppressed because it is too large

67
deps/v8/DEPS

@ -7,16 +7,28 @@ vars = {
}
deps = {
"v8/build/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "4ec6c4e3a94bd04a6da2858163d40b2429b8aad1",
"v8/build":
Var("git_url") + "/chromium/src/build.git" + "@" + "59daf502c36f20b5c9292f4bd9af85791f8a5884",
"v8/tools/gyp":
Var("git_url") + "/external/gyp.git" + "@" + "702ac58e477214c635d9b541932e75a95d349352",
"v8/third_party/icu":
Var("git_url") + "/chromium/deps/icu.git" + "@" + "c291cde264469b20ca969ce8832088acb21e0c48",
Var("git_url") + "/chromium/deps/icu.git" + "@" + "2341038bf72869a5683a893a2b319a48ffec7f62",
"v8/third_party/instrumented_libraries":
Var("git_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "f15768d7fdf68c0748d20738184120c8ab2e6db7",
"v8/buildtools":
Var("git_url") + "/chromium/buildtools.git" + "@" + "80b5126f91be4eb359248d28696746ef09d5be67",
Var("git_url") + "/chromium/buildtools.git" + "@" + "adb8bf4e8fc92aa1717bf151b862d58e6f27c4f2",
"v8/base/trace_event/common":
Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "c8c8665c2deaf1cc749d9f8e153256d4f67bf1b8",
Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "315bf1e2d45be7d53346c31cfcc37424a32c30c8",
"v8/third_party/WebKit/Source/platform/inspector_protocol":
Var("git_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "547960151fb364dd9a382fa79ffc9abfb184e3d1",
"v8/third_party/jinja2":
Var("git_url") + "/chromium/src/third_party/jinja2.git" + "@" + "2222b31554f03e62600cd7e383376a7c187967a1",
"v8/third_party/markupsafe":
Var("git_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
"v8/tools/mb":
Var('git_url') + '/chromium/src/tools/mb.git' + '@' + "99788b8b516c44d7db25cfb68695bc234fdee5ed",
"v8/tools/swarming_client":
Var('git_url') + '/external/swarming.client.git' + '@' + "df6e95e7669883c8fe9ef956c69a544154701a49",
Var('git_url') + '/external/swarming.client.git' + '@' + "e4288c3040a32f2e7ad92f957668f2ee3d36e5a6",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -25,17 +37,19 @@ deps = {
Var("git_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
"v8/test/mozilla/data":
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "57d3e2216fa86ad63b6c0a54914ba9dcbff96003",
Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "88bc7fe7586f161201c5f14f55c9c489f82b1b67",
"v8/test/test262/harness":
Var("git_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
"v8/tools/clang":
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "faee82e064e04e5cbf60cc7327e7a81d2a4557ad",
Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "3afb04a8153e40ff00f9eaa14337851c3ab4a368",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("git_url") + "/android_tools.git" + "@" + "adfd31794011488cd0fc716b53558b2d8a67af8b",
Var("git_url") + "/android_tools.git" + "@" + "af1c5a4cd6329ccdcf8c2bc93d9eea02f9d74869",
},
"win": {
"v8/third_party/cygwin":
@ -43,6 +57,8 @@ deps_os = {
}
}
recursedeps = [ 'v8/third_party/android_tools' ]
include_rules = [
# Everybody can use some things.
"+include",
@ -53,6 +69,7 @@ include_rules = [
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
"build",
"gypfiles",
"third_party",
]
@ -65,7 +82,7 @@ hooks = [
'pattern': '.',
'action': [
'python',
'v8/build/landmines.py',
'v8/gypfiles/landmines.py',
],
},
# Pull clang-format binaries using checked-in hashes.
@ -186,11 +203,33 @@ hooks = [
"-s", "v8/buildtools/linux64/gn.sha1",
],
},
{
# Downloads the current stable linux sysroot to build/linux/ if needed.
# This sysroot updates at about the same rate that the chrome build deps
# change.
'name': 'sysroot',
'pattern': '.',
'action': [
'python',
'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--running-as-hook',
],
},
{
# Pull sanitizer-instrumented third-party libraries if requested via
# GYP_DEFINES.
'name': 'instrumented_libraries',
'pattern': '\\.sha1',
'action': [
'python',
'v8/third_party/instrumented_libraries/scripts/download_binaries.py',
],
},
{
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
'pattern': '.',
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'],
},
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
@ -208,7 +247,7 @@ hooks = [
# Note: This must run before the clang update.
'name': 'gold_plugin',
'pattern': '.',
'action': ['python', 'v8/build/download_gold_plugin.py'],
'action': ['python', 'v8/gypfiles/download_gold_plugin.py'],
},
{
# Pull clang if needed or requested via GYP_DEFINES.
@ -220,6 +259,6 @@ hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "v8/build/gyp_v8"],
"action": ["python", "v8/gypfiles/gyp_v8"],
},
]

0
deps/v8/src/third_party/fdlibm/LICENSE → deps/v8/LICENSE.fdlibm

98
deps/v8/Makefile

@ -33,7 +33,6 @@ GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?=
# Special build flags. Use them like this: "make library=shared"
@ -122,10 +121,6 @@ endif
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
# presubmit=no
ifeq ($(presubmit), no)
TESTFLAGS += --no-presubmit
endif
# strictaliasing=off (workaround for GCC-4.5)
ifeq ($(strictaliasing), off)
GYPFLAGS += -Dv8_no_strict_aliasing=1
@ -227,6 +222,11 @@ ifeq ($(no_omit_framepointer), on)
GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
endif
ifdef android_ndk_root
GYPFLAGS += -Dandroid_ndk_root=$(android_ndk_root)
export ANDROID_NDK_ROOT = $(android_ndk_root)
endif
# ----------------- available targets: --------------------
# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
@ -235,7 +235,6 @@ endif
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM
# - "nacl" : cross-compile for Native Client (ia32 and x64)
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@ -245,21 +244,22 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 \
s390 s390x
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \
s390x
ARCHES32 = ia32 arm mips mipsel x87 ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
build/shim_headers.gypi build/features.gypi build/standalone.gypi \
build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
gypfiles/shim_headers.gypi gypfiles/features.gypi \
gypfiles/standalone.gypi \
gypfiles/toolchain.gypi gypfiles/all.gyp gypfiles/mac/asan.gyp \
test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
test/unittests/unittests.gyp tools/gyp/v8.gyp \
test/unittests/unittests.gyp src/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \
@ -273,13 +273,10 @@ endif
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
@ -288,9 +285,7 @@ ENVFILE = $(OUTDIR)/environment
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
$(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
must-set-NACL_SDK_ROOT
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS)
# Target definitions. "all" is the default.
all: $(DEFAULT_MODES)
@ -329,16 +324,6 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
$(NACL_ARCHES): $(addprefix $$@.,$(MODES))
$(NACL_BUILDS): $(GYPFILES) $(ENVFILE) \
Makefile.nacl must-set-NACL_SDK_ROOT
@$(MAKE) -f Makefile.nacl $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
# Test targets.
check: all
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
@ -382,15 +367,6 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 --nopresubmit --noi18n \
--command-prefix="tools/nacl-run.py"
$(addsuffix .check, $(NACL_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
@ -420,7 +396,7 @@ turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
tc: turbocheck
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)*
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@ -432,7 +408,7 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean gtags.clean
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean gtags.clean tags.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
@ -441,34 +417,28 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst s390x,s390,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst powerpc,ppc,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst ppc64,ppc,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst ppcle,ppc,$(CXX_TARGET_ARCH)))
$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. \
-Dv8_target_arch=$(V8_TARGET_ARCH) \
$(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
-Dtarget_arch=$(V8_TARGET_ARCH),) \
-Dtarget_arch=$(V8_TARGET_ARCH), \
$(if $(shell echo $(ARCHES32) | grep $(V8_TARGET_ARCH)), \
-Dtarget_arch=ia32,)) \
$(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
# Note that NACL_SDK_ROOT must be set to point to an appropriate
# Native Client SDK before using this makefile. You can download
# an SDK here:
# https://developers.google.com/native-client/sdk/download
# The path indicated by NACL_SDK_ROOT will typically end with
# a folder for a pepper version such as "pepper_25" that should
# have "tools" and "toolchain" subdirectories.
must-set-NACL_SDK_ROOT:
ifndef NACL_SDK_ROOT
$(error NACL_SDK_ROOT must be set)
endif
tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. -S.native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
@ -497,11 +467,21 @@ gtags.files: $(GYPFILES) $(ENVFILE)
# We need to manually set the stack limit here, to work around bugs in
# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null)
# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
# gtags.files after switching branches don't cause recipe failures.
GPATH GRTAGS GSYMS GTAGS: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
@bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
tags: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
@(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
(echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
ctags --fields=+l -L $<
tags.clean:
rm -r tags
dependencies builddeps:
$(error Use 'gclient sync' instead)

6
deps/v8/Makefile.android

@ -66,7 +66,7 @@ ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH)" \
tools/gyp/gyp --generator-output="${OUTDIR}" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

97
deps/v8/Makefile.nacl

@ -1,97 +0,0 @@
#
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
NACL_ARCHES = nacl_ia32 nacl_x64
MODES = release debug
# Generates all combinations of NACL ARCHES and MODES,
# e.g. "nacl_ia32.release" or "nacl_x64.release"
NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain)
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/linux_pnacl
ifeq ($(wildcard $(NACL_TOOLCHAIN)),)
$(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}")
endif
ifeq ($(ARCH), nacl_ia32)
GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-32"
else
ifeq ($(ARCH), nacl_x64)
GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
NACL_CC = "$(NACL_TOOLCHAIN)/bin/pnacl-clang"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/pnacl-clang++ --pnacl-allow-native -arch x86-64"
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
# ICU doesn't support NaCl.
GYPENV += v8_enable_i18n_support=0
# Disable strict aliasing - v8 code often relies on undefined behavior of C++.
GYPENV += v8_no_strict_aliasing=1
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
$(NACL_BUILDS): $(NACL_MAKEFILES)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
AR="$(NACL_TOOLCHAIN)/bin/pnacl-ar" \
RANLIB="$(NACL_TOOLCHAIN)/bin/pnacl-ranlib" \
LD="$(NACL_TOOLCHAIN)/bin/pnacl-ld" \
LINK=${NACL_LINK} \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# NACL GYP file generation targets.
$(NACL_MAKEFILES):
GYP_GENERATORS=make \
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
LINK=${NACL_LINK} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \
-Dwno_array_bounds=-Wno-array-bounds

4
deps/v8/OWNERS

@ -1,6 +1,9 @@
adamk@chromium.org
ahaas@chromium.org
bbudge@chromium.org
binji@chromium.org
bmeurer@chromium.org
bradnelson@chromium.org
cbruni@chromium.org
danno@chromium.org
epertoso@chromium.org
@ -15,6 +18,7 @@ machenbach@chromium.org
marja@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org

19
deps/v8/PRESUBMIT.py

@ -223,6 +223,8 @@ def _CommonChecks(input_api, output_api):
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(input_api.canned_checks.CheckGenderNeutral(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
@ -242,32 +244,15 @@ def _SkipTreeCheck(input_api, output_api):
return input_api.environ.get('PRESUBMIT_TREE_CHECK') == 'skip'
def _CheckChangeLogFlag(input_api, output_api, warn):
"""Checks usage of LOG= flag in the commit message."""
results = []
if (input_api.change.BUG and input_api.change.BUG != 'none' and
not 'LOG' in input_api.change.tags):
text = ('An issue reference (BUG=) requires a change log flag (LOG=). '
'Use LOG=Y for including this commit message in the change log. '
'Use LOG=N or leave blank otherwise.')
if warn:
results.append(output_api.PresubmitPromptWarning(text))
else:
results.append(output_api.PresubmitError(text))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, True))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLogFlag(input_api, output_api, False))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
if not _SkipTreeCheck(input_api, output_api):

8
deps/v8/WATCHLISTS

@ -44,7 +44,6 @@
},
'interpreter': {
'filepath': 'src/interpreter/' \
'|src/compiler/interpreter' \
'|src/compiler/bytecode' \
'|test/cctest/interpreter/' \
'|test/unittests/interpreter/',
@ -60,6 +59,9 @@
},
'ia32': {
'filepath': '/ia32/',
},
'merges': {
'filepath': '.',
}
},
@ -91,5 +93,9 @@
'ia32': [
'v8-x87-ports@googlegroups.com',
],
'merges': [
# Only enabled on branches created with tools/release/create_release.py
'v8-merges@googlegroups.com',
],
},
}

34
deps/v8/base/trace_event/common/trace_event_common.h

@ -612,6 +612,13 @@
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id, \
timestamp, arg1_name, \
arg1_val, arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
timestamp) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
@ -701,6 +708,13 @@
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val)
#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id, \
timestamp, arg1_name, arg1_val, \
arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
arg1_name, arg1_val, arg2_name, arg2_val)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@ -928,12 +942,8 @@
// Special trace event macro to trace task execution with the location where it
// was posted from.
#define TRACE_TASK_EXECUTION(run_function, task) \
TRACE_EVENT2("toplevel", run_function, "src_file", \
(task).posted_from.file_name(), "src_func", \
(task).posted_from.function_name()); \
TRACE_EVENT_API_SCOPED_TASK_EXECUTION_EVENT INTERNAL_TRACE_EVENT_UID( \
task_event)((task).posted_from.file_name());
#define TRACE_TASK_EXECUTION(run_function, task) \
INTERNAL_TRACE_TASK_EXECUTION(run_function, task)
// TRACE_EVENT_METADATA* events are information related to other
// injected events, not events in their own right.
@ -991,6 +1001,17 @@
INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
TRACE_ID_DONT_MANGLE(context))
// Macro to specify that two trace IDs are identical. For example,
// TRACE_BIND_IDS(
// "category", "name",
// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
// the current process have the same ID as events with ID
// ("blink::ResourceFetcher::FetchRequest", 0x2000).
#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
do { \
@ -1056,6 +1077,7 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
#define TRACE_EVENT_PHASE_BIND_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))

21
deps/v8/build/has_valgrind.py

@ -1,21 +0,0 @@
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')
LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')
LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))
if __name__ == '__main__':
print DoMain([])

18
deps/v8/build_overrides/build.gni

@ -0,0 +1,18 @@
# Copyright 2016 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
mac_sdk_min_build_override = "10.10"
mac_deployment_target_build_override = "10.7"
# Variable that can be used to support multiple build scenarios, like having
# Chromium specific targets in a client project's GN file etc.
build_with_chromium = false
# Uncomment these to specify a different NDK location and version in
# non-Chromium builds.
# default_android_ndk_root = "//third_party/android_tools/ndk"
# default_android_ndk_version = "r10e"
# Some non-Chromium builds don't support building java targets.
enable_java_templates = false

15
deps/v8/build_overrides/gtest.gni

@ -0,0 +1,15 @@
# Copyright 2016 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Exclude support for registering main function in multi-process tests.
gtest_include_multiprocess = false
# Exclude support for platform-specific operations across unit tests.
gtest_include_platform_test = false
# Exclude support for testing Objective C code on OS X and iOS.
gtest_include_objc_support = false
# Exclude support for flushing coverage files on iOS.
gtest_include_ios_coverage = false

26
deps/v8/build_overrides/v8.gni

@ -0,0 +1,26 @@
# Copyright 2015 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//build/config/v8_target_cpu.gni")
import("//gni/v8.gni")
if (is_android) {
import("//build/config/android/config.gni")
}
if (((v8_current_cpu == "x86" ||
v8_current_cpu == "x64" ||
v8_current_cpu=="x87") &&
(is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)) {
v8_enable_gdbjit_default = true
}
v8_imminent_deprecation_warnings_default = true
# Add simple extras solely for the purpose of the cctests.
v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
v8_experimental_extra_library_files = [ "//test/cctest/test-experimental-extra.js" ]

175
deps/v8/gni/isolate.gni

@ -0,0 +1,175 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/sanitizers/sanitizers.gni")
import("//third_party/icu/config.gni")
import("v8.gni")
declare_args() {
# Sets the test isolation mode (noop|prepare|check).
v8_test_isolation_mode = "noop"
}
template("v8_isolate_run") {
# Remember target name as within the action scope the target name will be
# different.
name = target_name
if (name != "" && invoker.isolate != "" && invoker.deps != [] &&
v8_test_isolation_mode != "noop") {
action(name + "_run") {
testonly = true
deps = invoker.deps
script = "//tools/isolate_driver.py"
sources = [
invoker.isolate,
]
inputs = [
# Files that are known to be involved in this step.
"//tools/swarming_client/isolate.py",
"//tools/swarming_client/run_isolated.py",
]
if (v8_test_isolation_mode == "prepare") {
outputs = [
"$root_out_dir/$name.isolated.gen.json",
]
} else if (v8_test_isolation_mode == "check") {
outputs = [
"$root_out_dir/$name.isolated",
"$root_out_dir/$name.isolated.state",
]
}
# Translate gn to gyp variables.
if (is_asan) {
asan = "1"
} else {
asan = "0"
}
if (is_msan) {
msan = "1"
} else {
msan = "0"
}
if (is_tsan) {
tsan = "1"
} else {
tsan = "0"
}
if (is_cfi) {
cfi_vptr = "1"
} else {
cfi_vptr = "0"
}
if (target_cpu == "x86") {
target_arch = "ia32"
} else {
target_arch = target_cpu
}
if (is_debug) {
configuration_name = "Debug"
} else {
configuration_name = "Release"
}
if (is_component_build) {
component = "shared_library"
} else {
component = "static_library"
}
if (icu_use_data_file) {
icu_use_data_file_flag = "1"
} else {
icu_use_data_file_flag = "0"
}
if (v8_use_external_startup_data) {
use_external_startup_data = "1"
} else {
use_external_startup_data = "0"
}
if (v8_use_snapshot) {
use_snapshot = "true"
} else {
use_snapshot = "false"
}
if (v8_has_valgrind) {
has_valgrind = "1"
} else {
has_valgrind = "0"
}
if (v8_gcmole) {
gcmole = "1"
} else {
gcmole = "0"
}
# Note, all paths will be rebased in isolate_driver.py to be relative to
# the isolate file.
args = [
v8_test_isolation_mode,
"--isolated",
rebase_path("$root_out_dir/$name.isolated", root_build_dir),
"--isolate",
rebase_path(invoker.isolate, root_build_dir),
# Path variables are used to replace file paths when loading a .isolate
# file
"--path-variable",
"DEPTH",
rebase_path("//", root_build_dir),
"--path-variable",
"PRODUCT_DIR",
rebase_path(root_out_dir, root_build_dir),
# TODO(machenbach): Set variables for remaining features.
"--config-variable",
"CONFIGURATION_NAME=$configuration_name",
"--config-variable",
"OS=$target_os",
"--config-variable",
"asan=$asan",
"--config-variable",
"cfi_vptr=$cfi_vptr",
"--config-variable",
"gcmole=$gcmole",
"--config-variable",
"has_valgrind=$has_valgrind",
"--config-variable",
"icu_use_data_file_flag=$icu_use_data_file_flag",
"--config-variable",
"msan=$msan",
"--config-variable",
"tsan=$tsan",
"--config-variable",
"coverage=0",
"--config-variable",
"sanitizer_coverage=0",
"--config-variable",
"component=$component",
"--config-variable",
"target_arch=$target_arch",
"--config-variable",
"v8_use_external_startup_data=$use_external_startup_data",
"--config-variable",
"v8_use_snapshot=$use_snapshot",
]
if (is_win) {
args += [
"--config-variable",
"msvs_version=2013",
]
} else {
args += [
"--config-variable",
"msvs_version=0",
]
}
}
}
}

108
deps/v8/gni/v8.gni

@ -0,0 +1,108 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
declare_args() {
# Indicate if valgrind was fetched as a custom deps to make it available on
# swarming.
v8_has_valgrind = false
# Indicate if gcmole was fetched as a hook to make it available on swarming.
v8_gcmole = false
# Turns on compiler optimizations in V8 in Debug build.
v8_optimized_debug = true
# Support for backtrace_symbols on linux.
v8_enable_backtrace = ""
# Enable the snapshot feature, for fast context creation.
# http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
v8_use_snapshot = true
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
}
if (v8_use_external_startup_data == "") {
# If not specified as a gn arg, use external startup data by default if
# a snapshot is used and if we're not on ios.
v8_use_external_startup_data = v8_use_snapshot && !is_ios
}
if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}
###############################################################################
# Templates
#
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
v8_path_prefix = get_path_info("../", "abspath")
# Common configs to remove or add in all v8 targets.
v8_remove_configs = [ "//build/config/compiler:chromium_code" ]
v8_add_configs = [
"//build/config/compiler:no_chromium_code",
v8_path_prefix + ":features",
v8_path_prefix + ":toolchain",
]
if (is_debug && !v8_optimized_debug) {
v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
v8_add_configs += [ "//build/config/compiler:no_optimize" ]
} else {
v8_remove_configs += [ "//build/config/compiler:default_optimization" ]
# TODO(crbug.com/621335) Rework this so that we don't have the confusion
# between "optimize_speed" and "optimize_max".
if (is_posix && !is_android && !using_sanitizer) {
v8_add_configs += [ "//build/config/compiler:optimize_speed" ]
} else {
v8_add_configs += [ "//build/config/compiler:optimize_max" ]
}
}
if (is_posix && v8_enable_backtrace) {
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
# All templates should be kept in sync.
template("v8_source_set") {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}
template("v8_executable") {
executable(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
if (is_linux) {
# For enabling ASLR.
ldflags = [ "-pie" ]
}
}
}
template("v8_component") {
component(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}

0
deps/v8/build/OWNERS → deps/v8/gypfiles/OWNERS

0
deps/v8/build/README.txt → deps/v8/gypfiles/README.txt

14
deps/v8/build/all.gyp → deps/v8/gypfiles/all.gyp

@ -8,11 +8,7 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
'../test/fuzzer/fuzzer.gyp:*',
'../test/unittests/unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {
@ -20,12 +16,20 @@
'../tools/parser-shell.gyp:parser-shell',
],
}],
# These items don't compile for Android on Mac.
['host_os!="mac" or OS!="android"', {
'dependencies': [
'../samples/samples.gyp:*',
'../test/cctest/cctest.gyp:*',
'../test/fuzzer/fuzzer.gyp:*',
'../test/unittests/unittests.gyp:*',
],
}],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/default.gyp:*',
'../test/ignition.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',

0
deps/v8/build/config/win/msvs_dependencies.isolate → deps/v8/gypfiles/config/win/msvs_dependencies.isolate

0
deps/v8/build/coverage_wrapper.py → deps/v8/gypfiles/coverage_wrapper.py

0
deps/v8/build/detect_v8_host_arch.py → deps/v8/gypfiles/detect_v8_host_arch.py

0
deps/v8/build/download_gold_plugin.py → deps/v8/gypfiles/download_gold_plugin.py

0
deps/v8/build/features.gypi → deps/v8/gypfiles/features.gypi

1
deps/v8/build/get_landmines.py → deps/v8/gypfiles/get_landmines.py

@ -27,6 +27,7 @@ def main():
print 'Switching to pinned msvs toolchain.'
print 'Clobbering to hopefully resolve problem with mksnapshot'
print 'Clobber after ICU roll.'
print 'Clobber after Android NDK update.'
return 0

6
deps/v8/build/gyp_environment.py → deps/v8/gypfiles/gyp_environment.py

@ -31,6 +31,7 @@ def apply_gyp_environment(file_path=None):
supported_vars = ( 'V8_GYP_FILE',
'V8_GYP_SYNTAX_CHECK',
'GYP_DEFINES',
'GYP_GENERATORS',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT', )
for var in supported_vars:
@ -51,4 +52,9 @@ def set_environment():
# Update the environment based on v8.gyp_env
gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env')
apply_gyp_environment(gyp_env_path)
if not os.environ.get('GYP_GENERATORS'):
# Default to ninja on all platforms.
os.environ['GYP_GENERATORS'] = 'ninja'
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()

8
deps/v8/build/gyp_v8 → deps/v8/gypfiles/gyp_v8

@ -43,7 +43,7 @@ import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
v8_root = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
sys.path.insert(0, os.path.join(v8_root, 'tools', 'gyp', 'pylib'))
import gyp
# Add paths so that pymod_do_main(...) can import files.
@ -90,7 +90,7 @@ def additional_include_files(args=[]):
result.append(path)
# Always include standalone.gypi
AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
AddInclude(os.path.join(v8_root, 'gypfiles', 'standalone.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@ -118,6 +118,10 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
if int(os.environ.get('GYP_CHROMIUM_NO_ACTION', 0)):
print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.'
sys.exit(0)
gyp_environment.set_environment()
# This could give false positives since it doesn't actually do real option

0
deps/v8/build/gyp_v8.py → deps/v8/gypfiles/gyp_v8.py

6
deps/v8/build/isolate.gypi → deps/v8/gypfiles/isolate.gypi

@ -17,7 +17,7 @@
# 'foo_test',
# ],
# 'includes': [
# '../build/isolate.gypi',
# '../gypfiles/isolate.gypi',
# ],
# 'sources': [
# 'foo_test.isolate',
@ -73,15 +73,13 @@
'--config-variable', 'cfi_vptr=<(cfi_vptr)',
'--config-variable', 'gcmole=<(gcmole)',
'--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=0',
'--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',
'--config-variable', 'v8_separate_ignition_snapshot=<(v8_separate_ignition_snapshot)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],

0
deps/v8/build/landmine_utils.py → deps/v8/gypfiles/landmine_utils.py

2
deps/v8/build/landmines.py → deps/v8/gypfiles/landmines.py

@ -198,7 +198,7 @@ def process_options():
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
default=[os.path.join(SRC_DIR, 'gypfiles', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')

0
deps/v8/build/mac/asan.gyp → deps/v8/gypfiles/mac/asan.gyp

59
deps/v8/gypfiles/set_clang_warning_flags.gypi

@ -0,0 +1,59 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included to set clang-specific compiler flags.
# To use this the following variable can be defined:
# clang_warning_flags: list: Compiler flags to pass to clang.
# clang_warning_flags_unset: list: Compiler flags to not pass to clang.
#
# Only use this in third-party code. In chromium_code, fix your code to not
# warn instead!
#
# Note that the gypi file is included in target_defaults, so it does not need
# to be explicitly included.
#
# Warning flags set by this will be used on all platforms. If you want to set
# warning flags on only some platforms, you have to do so manually.
#
# To use this, create a gyp target with the following form:
# {
# 'target_name': 'my_target',
# 'variables': {
# 'clang_warning_flags': ['-Wno-awesome-warning'],
# 'clang_warning_flags_unset': ['-Wpreviously-set-flag'],
# }
# }
{
'variables': {
'clang_warning_flags_unset%': [], # Provide a default value.
},
'conditions': [
['clang==1', {
# This uses >@ instead of @< to also see clang_warning_flags set in
# targets directly, not just the clang_warning_flags in target_defaults.
'cflags': [ '>@(clang_warning_flags)' ],
'cflags!': [ '>@(clang_warning_flags_unset)' ],
'xcode_settings': {
'WARNING_CFLAGS': ['>@(clang_warning_flags)'],
'WARNING_CFLAGS!': ['>@(clang_warning_flags_unset)'],
},
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [ '>@(clang_warning_flags)' ],
'AdditionalOptions!': [ '>@(clang_warning_flags_unset)' ],
},
},
}],
['clang==0 and host_clang==1', {
'target_conditions': [
['_toolset=="host"', {
'cflags': [ '>@(clang_warning_flags)' ],
'cflags!': [ '>@(clang_warning_flags_unset)' ],
}],
],
}],
],
}

0
deps/v8/build/shim_headers.gypi → deps/v8/gypfiles/shim_headers.gypi

250
deps/v8/build/standalone.gypi → deps/v8/gypfiles/standalone.gypi

@ -49,35 +49,70 @@
'variables': {
'variables': {
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac" and OS!="aix"
'host_arch%': 'ia32',
}],
],
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac" or OS=="qnx" or OS=="aix"', {
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac" and OS!="aix"
'host_arch%': 'ia32',
}],
],
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
# By default we build against a stable sysroot image to avoid
# depending on the packages installed on the local machine. Set this
# to 0 to build against locally installed headers and libraries (e.g.
# if packaging for a linux distro)
'use_sysroot%': 1,
},
'host_arch%': '<(host_arch)',
'target_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'use_sysroot%': '<(use_sysroot)',
'base_dir%': '<!(cd <(DEPTH) && python -c "import os; print os.getcwd()")',
# Instrument for code coverage and use coverage wrapper to exclude some
# files. Uses gcov if clang=0 is set explicitly. Otherwise,
# sanitizer_coverage must be set too.
'coverage%': 0,
# Default sysroot if no sysroot can be provided.
'sysroot%': '',
'conditions': [
# The system root for linux builds.
['OS=="linux" and use_sysroot==1', {
'conditions': [
['target_arch=="arm"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_arm-sysroot',
}],
['target_arch=="x64"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_amd64-sysroot',
}],
['target_arch=="ia32"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_i386-sysroot',
}],
['target_arch=="mipsel"', {
'sysroot%': '<!(cd <(DEPTH) && pwd -P)/build/linux/debian_wheezy_mips-sysroot',
}],
],
}], # OS=="linux" and use_sysroot==1
],
},
'base_dir%': '<(base_dir)',
'host_arch%': '<(host_arch)',
'target_arch%': '<(target_arch)',
'v8_target_arch%': '<(target_arch)',
'coverage%': '<(coverage)',
'sysroot%': '<(sysroot)',
'asan%': 0,
'lsan%': 0,
'msan%': 0,
@ -86,12 +121,19 @@
# also controls coverage granularity (1 for function-level, 2 for
# block-level, 3 for edge-level).
'sanitizer_coverage%': 0,
# Use dynamic libraries instrumented by one of the sanitizers
# instead of the standard system libraries. Set this flag to download
# prebuilt binaries from GCS.
'use_prebuilt_instrumented_libraries%': 0,
# Use libc++ (buildtools/third_party/libc++ and
# buildtools/third_party/libc++abi) instead of stdlibc++ as standard
# library. This is intended to be used for instrumented builds.
'use_custom_libcxx%': 0,
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'make_clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'use_lto%': 0,
@ -112,11 +154,11 @@
'use_goma%': 0,
'gomadir%': '',
# Check if valgrind directories are present.
'has_valgrind%': '<!pymod_do_main(has_valgrind)',
'test_isolation_mode%': 'noop',
# By default, use ICU data file (icudtl.dat).
'icu_use_data_file_flag%': 1,
'conditions': [
# Set default gomadir.
['OS=="win"', {
@ -134,7 +176,7 @@
# are using a custom toolchain and need to control -B in ldflags.
# Do not use 32-bit gold on 32-bit hosts as it runs out address space
# for component=static_library builds.
['(OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))', {
['((OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))) or (OS=="linux" and target_arch=="mipsel")', {
'linux_use_bundled_gold%': 1,
}, {
'linux_use_bundled_gold%': 0,
@ -143,6 +185,7 @@
},
'base_dir%': '<(base_dir)',
'clang_dir%': '<(clang_dir)',
'make_clang_dir%': '<(make_clang_dir)',
'host_arch%': '<(host_arch)',
'host_clang%': '<(host_clang)',
'target_arch%': '<(target_arch)',
@ -155,6 +198,7 @@
'msan%': '<(msan)',
'tsan%': '<(tsan)',
'sanitizer_coverage%': '<(sanitizer_coverage)',
'use_prebuilt_instrumented_libraries%': '<(use_prebuilt_instrumented_libraries)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
'linux_use_bundled_gold%': '<(linux_use_bundled_gold)',
'use_lto%': '<(use_lto)',
@ -164,7 +208,8 @@
'test_isolation_mode%': '<(test_isolation_mode)',
'fastbuild%': '<(fastbuild)',
'coverage%': '<(coverage)',
'has_valgrind%': '<(has_valgrind)',
'sysroot%': '<(sysroot)',
'icu_use_data_file_flag%': '<(icu_use_data_file_flag)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@ -194,12 +239,12 @@
# their own default value.
'v8_use_external_startup_data%': 1,
# Use a separate ignition snapshot file in standalone builds.
'v8_separate_ignition_snapshot': 1,
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
# Relative path to inspector.gyp from this file.
'inspector_gyp_path': '../src/v8-inspector/inspector.gyp',
'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
@ -211,6 +256,18 @@
}, {
'want_separate_host_toolset': 0,
}],
['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="arm64" and host_arch!="arm64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="mips64el" and host_arch!="mips64el") or \
(v8_target_arch=="mips" and host_arch!="mips") or \
(v8_target_arch=="mips64" and host_arch!="mips64") or \
(v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', {
'want_separate_host_toolset_mkpeephole': 1,
}, {
'want_separate_host_toolset_mkpeephole': 0,
}],
['OS == "win"', {
'os_posix%': 0,
}, {
@ -261,50 +318,60 @@
# because it is used at different levels in the GYP files.
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
# Version of the NDK. Used to ensure full rebuilds on NDK rolls.
'android_ndk_version%': 'r11c',
'host_os%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')",
'os_folder_name%': "<!(uname -s | sed -e 's/Linux/linux/;s/Darwin/darwin/')",
},
# Copy conditionally-set variables out one scope.
'android_ndk_root%': '<(android_ndk_root)',
'android_ndk_version%': '<(android_ndk_version)',
'host_os%': '<(host_os)',
'os_folder_name%': '<(os_folder_name)',
'conditions': [
['target_arch == "ia32"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/x86-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'x86',
'android_target_platform%': '16',
'arm_version%': 'default',
}],
['target_arch == "x64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/x86_64-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'x86_64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch=="arm"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/arm-linux-androideabi-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'arm',
'android_target_platform%': '16',
'arm_version%': 7,
}],
['target_arch == "arm64"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/aarch64-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'arm64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
['target_arch == "mipsel"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/mipsel-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'mips',
'android_target_platform%': '16',
'arm_version%': 'default',
}],
['target_arch == "mips64el"', {
'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(host_os)-<(android_host_arch)/bin',
'android_toolchain%': '<(android_ndk_root)/toolchains/mips64el-linux-android-4.9/prebuilt/<(os_folder_name)-<(android_host_arch)/bin',
'android_target_arch%': 'mips64',
'android_target_platform%': '21',
'arm_version%': 'default',
}],
],
},
# Copy conditionally-set variables out one scope.
'android_ndk_version%': '<(android_ndk_version)',
'android_target_arch%': '<(android_target_arch)',
'android_target_platform%': '<(android_target_platform)',
'android_toolchain%': '<(android_toolchain)',
@ -351,6 +418,12 @@
'android_libcpp_library': 'c++_static',
}], # OS=="android"
['host_clang==1', {
'conditions':[
['OS=="android"', {
'host_ld': '<!(which ld)',
'host_ranlib': '<!(which ranlib)',
}],
],
'host_cc': '<(clang_dir)/bin/clang',
'host_cxx': '<(clang_dir)/bin/clang++',
}, {
@ -373,19 +446,23 @@
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
# Indicates if gcmole tools are downloaded by a hook.
'gcmole%': 0,
},
'target_defaults': {
'variables': {
'v8_code%': '<(v8_code)',
'clang_warning_flags': [
# TODO(thakis): https://crbug.com/604888
'-Wno-undefined-var-template',
# TODO(yangguo): issue 5258
'-Wno-nonportable-include-path',
],
'conditions':[
['OS=="android"', {
'host_os%': '<(host_os)',
}],
],
},
'includes': [ 'set_clang_warning_flags.gypi', ],
'default_configuration': 'Debug',
'configurations': {
'DebugBaseCommon': {
@ -431,26 +508,9 @@
# things when their commandline changes). Nothing should ever read this
# define.
'defines': ['CR_CLANG_REVISION=<!(python <(DEPTH)/tools/clang/scripts/update.py --print-revision)'],
'conditions': [
['host_clang==1', {
'target_conditions': [
['_toolset=="host"', {
'cflags+': [
'-Wno-format-pedantic',
],
}],
],
}],
['clang==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags+': [
'-Wno-format-pedantic',
],
}],
],
}],
],
}],
['clang==1 and target_arch=="ia32"', {
'cflags': ['-mstack-alignment=16', '-mstackrealign'],
}],
['fastbuild!=0', {
'conditions': [
@ -605,6 +665,11 @@
}],
],
}],
['use_prebuilt_instrumented_libraries==1', {
'dependencies': [
'<(DEPTH)/third_party/instrumented_libraries/instrumented_libraries.gyp:prebuilt_instrumented_libraries',
],
}],
['use_custom_libcxx==1', {
'dependencies': [
'<(DEPTH)/buildtools/third_party/libc++/libc++.gyp:libcxx_proxy',
@ -637,6 +702,18 @@
'-B<(base_dir)/third_party/binutils/Linux_x64/Release/bin',
],
}],
['sysroot!="" and clang==1', {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'--sysroot=<(sysroot)',
],
'ldflags': [
'--sysroot=<(sysroot)',
'<!(<(DEPTH)/build/linux/sysroot_ld_path.sh <(sysroot))',
],
}]]
}],
],
},
}],
@ -660,7 +737,7 @@
],
},
'dependencies': [
'<(DEPTH)/build/mac/asan.gyp:asan_dynamic_runtime',
'<(DEPTH)/gypfiles/mac/asan.gyp:asan_dynamic_runtime',
],
'target_conditions': [
['_type!="static_library"', {
@ -690,11 +767,9 @@
'-Wall',
'<(werror)',
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
'-Wmissing-field-initializers',
'-Wno-gnu-zero-variadic-macro-arguments',
],
'cflags_cc': [
@ -932,6 +1007,40 @@
}],
],
},
'conditions': [
['clang==1', {
'VCCLCompilerTool': {
'AdditionalOptions': [
# Don't warn about unused function parameters.
# (This is also used on other platforms.)
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization
# pattern.
'-Wno-missing-field-initializers',
# TODO(hans): Make this list shorter eventually, http://crbug.com/504657
'-Qunused-arguments', # http://crbug.com/504658
'-Wno-microsoft-enum-value', # http://crbug.com/505296
'-Wno-unknown-pragmas', # http://crbug.com/505314
'-Wno-microsoft-cast', # http://crbug.com/550065
],
},
}],
['clang==1 and MSVS_VERSION == "2013"', {
'VCCLCompilerTool': {
'AdditionalOptions': [
'-fmsc-version=1800',
],
},
}],
['clang==1 and MSVS_VERSION == "2015"', {
'VCCLCompilerTool': {
'AdditionalOptions': [
'-fmsc-version=1900',
],
},
}],
],
},
},
}], # OS=="win"
@ -984,6 +1093,13 @@
'CLANG_CXX_LANGUAGE_STANDARD': 'c++11', # -std=c++11
},
'conditions': [
['clang_xcode==0', {
'xcode_settings': {
'CC': '<(clang_dir)/bin/clang',
'LDPLUSPLUS': '<(clang_dir)/bin/clang++',
'CLANG_CXX_LIBRARY': 'libc++'
},
}],
['v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el"', {
'xcode_settings': {'WARNING_CFLAGS': ['-Wshorten-64-to-32']},
@ -1002,7 +1118,6 @@
'target_defaults': {
'defines': [
'ANDROID',
'V8_ANDROID_LOG_STDOUT',
],
'configurations': {
'Release': {
@ -1039,6 +1154,7 @@
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
'ANDROID_NDK_VERSION=<(android_ndk_version)',
],
'ldflags!': [
'-pthread', # Not supported by Android toolchain.
@ -1188,8 +1304,12 @@
# Hardcode the compiler names in the Makefile so that
# it won't depend on the environment at make time.
'make_global_settings': [
['LD', '<!(/bin/echo -n <(android_toolchain)/../*/bin/ld)'],
['RANLIB', '<!(/bin/echo -n <(android_toolchain)/../*/bin/ranlib)'],
['CC', '<!(/bin/echo -n <(android_toolchain)/*-gcc)'],
['CXX', '<!(/bin/echo -n <(android_toolchain)/*-g++)'],
['LD.host', '<(host_ld)'],
['RANLIB.host', '<(host_ranlib)'],
['CC.host', '<(host_cc)'],
['CXX.host', '<(host_cxx)'],
],
@ -1261,10 +1381,10 @@
['coverage==1', {
# Wrap goma with coverage wrapper.
'make_global_settings': [
['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py <(gomadir)/gomacc'],
['CC_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
['CC.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
['CXX.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py <(gomadir)/gomacc'],
],
}, {
# Use only goma wrapper.
@ -1281,10 +1401,10 @@
['coverage==1', {
# Use only coverage wrapper.
'make_global_settings': [
['CC_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CXX_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CC.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CXX.host_wrapper', '<(base_dir)/build/coverage_wrapper.py'],
['CC_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
['CXX_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
['CC.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
['CXX.host_wrapper', '<(base_dir)/gypfiles/coverage_wrapper.py'],
],
}],
],

99
deps/v8/build/toolchain.gypi → deps/v8/gypfiles/toolchain.gypi

@ -37,16 +37,11 @@
'tsan%': 0,
'ubsan%': 0,
'ubsan_vptr%': 0,
'has_valgrind%': 0,
'coverage%': 0,
'v8_target_arch%': '<(target_arch)',
'v8_host_byteorder%': '<!(python -c "import sys; print sys.byteorder")',
'force_dynamic_crt%': 0,
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
# for NaCl JIT support. The nacl_target_arch variable provides
# the 'true' target arch for places in this file that need it.
# TODO(bradchen): get rid of nacl_target_arch when someday
# NaCl V8 builds stop using the ARM simulator
'nacl_target_arch%': 'none', # must be set externally
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
@ -62,6 +57,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
# Print to stdout on Android.
'v8_android_log_stdout%': 0,
# Force disable libstdc++ debug mode.
'disable_glibcxx_debug%': 0,
@ -74,16 +72,14 @@
'v8_no_strict_aliasing%': 0,
# Chrome needs this definition unconditionally. For standalone V8 builds,
# it's handled in build/standalone.gypi.
# it's handled in gypfiles/standalone.gypi.
'want_separate_host_toolset%': 1,
'want_separate_host_toolset_mkpeephole%': 1,
# Toolset the d8 binary should be compiled for. Possible values are 'host'
# and 'target'. If you want to run v8 tests, it needs to be set to 'target'.
# Toolset the shell binary should be compiled for. Possible values are
# 'host' and 'target'.
# The setting is ignored if want_separate_host_toolset is 0.
'v8_toolset_for_d8%': 'target',
# Control usage of a separate ignition snapshot file.
'v8_separate_ignition_snapshot%': 0,
'v8_toolset_for_shell%': 'target',
'host_os%': '<(OS)',
'werror%': '-Werror',
@ -109,7 +105,7 @@
# are using a custom toolchain and need to control -B in ldflags.
# Do not use 32-bit gold on 32-bit hosts as it runs out address space
# for component=static_library builds.
['OS=="linux" and (target_arch=="x64" or target_arch=="arm")', {
['((OS=="linux" or OS=="android") and (target_arch=="x64" or target_arch=="arm" or (target_arch=="ia32" and host_arch=="x64"))) or (OS=="linux" and target_arch=="mipsel")', {
'linux_use_bundled_gold%': 1,
}, {
'linux_use_bundled_gold%': 0,
@ -135,6 +131,9 @@
# Link-Time Optimizations
'use_lto%': 0,
# Indicates if gcmole tools are downloaded by a hook.
'gcmole%': 0,
},
'conditions': [
['host_arch=="ia32" or host_arch=="x64" or \
@ -362,28 +361,47 @@
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64" or v8_target_arch=="mips64el") \
and v8_target_arch==target_arch', {
['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'target_conditions': [
['_toolset=="target"', {
# Target built with a Mips CXX compiler.
'variables': {
'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
},
'conditions': [
['ldso_path!=""', {
'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
}],
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
['v8_target_arch==target_arch', {
# Target built with a Mips CXX compiler.
'variables': {
'ldso_path%': '<!(/bin/echo -n $LDSO_PATH)',
'ld_r_path%': '<!(/bin/echo -n $LD_R_PATH)',
},
'conditions': [
['ldso_path!=""', {
'ldflags': ['-Wl,--dynamic-linker=<(ldso_path)'],
}],
['ld_r_path!=""', {
'ldflags': ['-Wl,--rpath=<(ld_r_path)'],
}],
[ 'clang==1', {
'cflags': ['-integrated-as'],
}],
['OS!="mac"', {
'defines': ['_MIPS_TARGET_HW',],
}, {
'defines': ['_MIPS_TARGET_SIMULATOR',],
}],
],
}, {
'defines': ['_MIPS_TARGET_SIMULATOR',],
}],
[ 'clang==1', {
'cflags': ['-integrated-as'],
],
}], #'_toolset=="target"
['_toolset=="host"', {
'conditions': [
['v8_target_arch==target_arch and OS!="mac"', {
'defines': ['_MIPS_TARGET_HW',],
}, {
'defines': ['_MIPS_TARGET_SIMULATOR',],
}],
],
}],
}], #'_toolset=="host"
],
}],
['v8_target_arch=="mips"', {
@ -671,6 +689,9 @@
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
'defines': [
'FPU_MODE_FP32',
],
'cflags!': ['-mfp64', '-mfpxx'],
'conditions': [
[ 'clang==0', {
@ -1024,7 +1045,7 @@
}],
['_toolset=="target"', {
'conditions': [
['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
['target_cxx_is_biarch==1', {
'conditions': [
['host_arch=="s390" or host_arch=="s390x"', {
'cflags': [ '-m31' ],
@ -1064,6 +1085,11 @@
}],
],
}],
['OS=="android" and v8_android_log_stdout==1', {
'defines': [
'V8_ANDROID_LOG_STDOUT',
],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="qnx" or OS=="aix"', {
'conditions': [
@ -1085,6 +1111,7 @@
'defines': [
# Support for malloc(0)
'_LINUX_SOURCE_COMPAT=1',
'__STDC_FORMAT_MACROS',
'_ALL_SOURCE=1'],
'conditions': [
[ 'v8_target_arch=="ppc"', {
@ -1092,7 +1119,7 @@
}],
[ 'v8_target_arch=="ppc64"', {
'cflags': [ '-maix64' ],
'ldflags': [ '-maix64' ],
'ldflags': [ '-maix64 -Wl,-bbigtoc' ],
}],
],
}],
@ -1186,9 +1213,8 @@
'-ffunction-sections',
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
# Don't use -O3 with sanitizers.
['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
['asan==0 and msan==0 and lsan==0 \
and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
@ -1305,9 +1331,8 @@
'<(wno_array_bounds)',
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
# Don't use -O3 with sanitizers.
['nacl_target_arch=="none" and asan==0 and msan==0 and lsan==0 \
['asan==0 and msan==0 and lsan==0 \
and tsan==0 and ubsan==0 and ubsan_vptr==0', {
'cflags': ['-O3'],
'cflags!': ['-O2'],

235
deps/v8/build/vs_toolchain.py → deps/v8/gypfiles/vs_toolchain.py

@ -4,39 +4,43 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import json
import os
import pipes
import shutil
import subprocess
import sys
import vs_toolchain
script_dir = os.path.dirname(os.path.realpath(__file__))
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(1, os.path.join(chrome_src, 'tools'))
sys.path.insert(0, os.path.join(chrome_src, 'build', 'gyp', 'pylib'))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
json_data_file = os.path.join(script_dir, 'win_toolchain.json')
import gyp
# Use MSVS2013 as the default toolchain.
CURRENT_DEFAULT_TOOLCHAIN_VERSION = '2013'
def SetEnvironmentAndGetRuntimeDllDirs():
"""Sets up os.environ to use the depot_tools VS toolchain with gyp, and
returns the location of the VS runtime DLLs so they can be copied into
the output directory after gyp generation.
"""
vs2013_runtime_dll_dirs = None
vs_runtime_dll_dirs = None
depot_tools_win_toolchain = \
bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', '1')))
# When running on a non-Windows host, only do this if the SDK has explicitly
# been downloaded before (in which case json_data_file will exist).
if ((sys.platform in ('win32', 'cygwin') or os.path.exists(json_data_file))
and depot_tools_win_toolchain):
if not os.path.exists(json_data_file):
if ShouldUpdateToolchain():
Update()
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
@ -50,7 +54,7 @@ def SetEnvironmentAndGetRuntimeDllDirs():
# TODO(scottmg): The order unfortunately matters in these. They should be
# split into separate keys for x86 and x64. (See CopyVsRuntimeDlls call
# below). http://crbug.com/345992
vs2013_runtime_dll_dirs = toolchain_data['runtime_dirs']
vs_runtime_dll_dirs = toolchain_data['runtime_dirs']
os.environ['GYP_MSVS_OVERRIDE_PATH'] = toolchain
os.environ['GYP_MSVS_VERSION'] = version
@ -65,30 +69,100 @@ def SetEnvironmentAndGetRuntimeDllDirs():
os.environ['WINDOWSSDKDIR'] = win_sdk
os.environ['WDK_DIR'] = wdk
# Include the VS runtime in the PATH in case it's not machine-installed.
runtime_path = ';'.join(vs2013_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + ';' + os.environ['PATH']
return vs2013_runtime_dll_dirs
runtime_path = os.path.pathsep.join(vs_runtime_dll_dirs)
os.environ['PATH'] = runtime_path + os.path.pathsep + os.environ['PATH']
elif sys.platform == 'win32' and not depot_tools_win_toolchain:
if not 'GYP_MSVS_OVERRIDE_PATH' in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = DetectVisualStudioPath()
if not 'GYP_MSVS_VERSION' in os.environ:
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
return vs_runtime_dll_dirs
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
raise Exception('The python library _winreg not found.')
def GetVisualStudioVersion():
"""Return GYP_MSVS_VERSION of Visual Studio.
"""
return os.environ.get('GYP_MSVS_VERSION', CURRENT_DEFAULT_TOOLCHAIN_VERSION)
def DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Note that this code is used from
# build/toolchain/win/setup_toolchain.py as well.
version_as_year = GetVisualStudioVersion()
year_to_version = {
'2013': '12.0',
'2015': '14.0',
}
if version_as_year not in year_to_version:
raise Exception(('Visual Studio version %s (from GYP_MSVS_VERSION)'
' not supported. Supported versions are: %s') % (
version_as_year, ', '.join(year_to_version.keys())))
version = year_to_version[version_as_year]
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version]
for key in keys:
path = _RegistryGetValue(key, 'InstallDir')
if not path:
continue
path = os.path.normpath(os.path.join(path, '..', '..'))
return path
raise Exception(('Visual Studio Version %s (from GYP_MSVS_VERSION)'
' not found.') % (version_as_year))
def _VersionNumber():
"""Gets the standard version number ('120', '140', etc.) based on
GYP_MSVS_VERSION."""
if os.environ['GYP_MSVS_VERSION'] == '2013':
vs_version = GetVisualStudioVersion()
if vs_version == '2013':
return '120'
elif os.environ['GYP_MSVS_VERSION'] == '2015':
elif vs_version == '2015':
return '140'
else:
raise ValueError('Unexpected GYP_MSVS_VERSION')
def _CopyRuntimeImpl(target, source):
def _CopyRuntimeImpl(target, source, verbose=True):
"""Copy |source| to |target| if it doesn't already exist or if it
needs to be updated.
"""
if (os.path.isdir(os.path.dirname(target)) and
(not os.path.isfile(target) or
os.stat(target).st_mtime != os.stat(source).st_mtime)):
print 'Copying %s to %s...' % (source, target)
if verbose:
print 'Copying %s to %s...' % (source, target)
if os.path.exists(target):
os.unlink(target)
shutil.copy2(source, target)
@ -104,14 +178,50 @@ def _CopyRuntime2013(target_dir, source_dir, dll_pattern):
_CopyRuntimeImpl(target, source)
def _CopyRuntime2015(target_dir, source_dir, dll_pattern):
def _CopyRuntime2015(target_dir, source_dir, dll_pattern, suffix):
"""Copy both the msvcp and vccorlib runtime DLLs, only if the target doesn't
exist, but the target directory does exist."""
for file_part in ('msvcp', 'vccorlib'):
for file_part in ('msvcp', 'vccorlib', 'vcruntime'):
dll = dll_pattern % file_part
target = os.path.join(target_dir, dll)
source = os.path.join(source_dir, dll)
_CopyRuntimeImpl(target, source)
ucrt_src_dir = os.path.join(source_dir, 'api-ms-win-*.dll')
print 'Copying %s to %s...' % (ucrt_src_dir, target_dir)
for ucrt_src_file in glob.glob(ucrt_src_dir):
file_part = os.path.basename(ucrt_src_file)
ucrt_dst_file = os.path.join(target_dir, file_part)
_CopyRuntimeImpl(ucrt_dst_file, ucrt_src_file, False)
_CopyRuntimeImpl(os.path.join(target_dir, 'ucrtbase' + suffix),
os.path.join(source_dir, 'ucrtbase' + suffix))
def _CopyRuntime(target_dir, source_dir, target_cpu, debug):
"""Copy the VS runtime DLLs, only if the target doesn't exist, but the target
directory does exist. Handles VS 2013 and VS 2015."""
suffix = "d.dll" if debug else ".dll"
if GetVisualStudioVersion() == '2015':
_CopyRuntime2015(target_dir, source_dir, '%s140' + suffix, suffix)
else:
_CopyRuntime2013(target_dir, source_dir, 'msvc%s120' + suffix)
# Copy the PGO runtime library to the release directories.
if not debug and os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
if target_cpu == "x86":
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll), source_x86)
elif target_cpu == "x64":
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(target_dir, pgo_runtime_dll),
source_x64)
else:
raise NotImplementedError("Unexpected target_cpu value:" + target_cpu)
def CopyVsRuntimeDlls(output_dir, runtime_dirs):
@ -121,48 +231,19 @@ def CopyVsRuntimeDlls(output_dir, runtime_dirs):
This needs to be run after gyp has been run so that the expected target
output directories are already created.
This is used for the GYP build and gclient runhooks.
"""
x86, x64 = runtime_dirs
out_debug = os.path.join(output_dir, 'Debug')
out_debug_nacl64 = os.path.join(output_dir, 'Debug', 'x64')
out_release = os.path.join(output_dir, 'Release')
out_release_nacl64 = os.path.join(output_dir, 'Release', 'x64')
out_debug_x64 = os.path.join(output_dir, 'Debug_x64')
out_release_x64 = os.path.join(output_dir, 'Release_x64')
if os.path.exists(out_debug) and not os.path.exists(out_debug_nacl64):
os.makedirs(out_debug_nacl64)
if os.path.exists(out_release) and not os.path.exists(out_release_nacl64):
os.makedirs(out_release_nacl64)
if os.environ.get('GYP_MSVS_VERSION') == '2015':
_CopyRuntime2015(out_debug, x86, '%s140d.dll')
_CopyRuntime2015(out_release, x86, '%s140.dll')
_CopyRuntime2015(out_debug_x64, x64, '%s140d.dll')
_CopyRuntime2015(out_release_x64, x64, '%s140.dll')
_CopyRuntime2015(out_debug_nacl64, x64, '%s140d.dll')
_CopyRuntime2015(out_release_nacl64, x64, '%s140.dll')
else:
# VS2013 is the default.
_CopyRuntime2013(out_debug, x86, 'msvc%s120d.dll')
_CopyRuntime2013(out_release, x86, 'msvc%s120.dll')
_CopyRuntime2013(out_debug_x64, x64, 'msvc%s120d.dll')
_CopyRuntime2013(out_release_x64, x64, 'msvc%s120.dll')
_CopyRuntime2013(out_debug_nacl64, x64, 'msvc%s120d.dll')
_CopyRuntime2013(out_release_nacl64, x64, 'msvc%s120.dll')
# Copy the PGO runtime library to the release directories.
if os.environ.get('GYP_MSVS_OVERRIDE_PATH'):
pgo_x86_runtime_dir = os.path.join(os.environ.get('GYP_MSVS_OVERRIDE_PATH'),
'VC', 'bin')
pgo_x64_runtime_dir = os.path.join(pgo_x86_runtime_dir, 'amd64')
pgo_runtime_dll = 'pgort' + _VersionNumber() + '.dll'
source_x86 = os.path.join(pgo_x86_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x86):
_CopyRuntimeImpl(os.path.join(out_release, pgo_runtime_dll), source_x86)
source_x64 = os.path.join(pgo_x64_runtime_dir, pgo_runtime_dll)
if os.path.exists(source_x64):
_CopyRuntimeImpl(os.path.join(out_release_x64, pgo_runtime_dll),
source_x64)
_CopyRuntime(out_debug, x86, "x86", debug=True)
_CopyRuntime(out_release, x86, "x86", debug=False)
_CopyRuntime(out_debug_x64, x64, "x64", debug=True)
_CopyRuntime(out_release_x64, x64, "x64", debug=False)
def CopyDlls(target_dir, configuration, target_cpu):
@ -173,28 +254,41 @@ def CopyDlls(target_dir, configuration, target_cpu):
The debug configuration gets both the debug and release DLLs; the
release config only the latter.
This is used for the GN build.
"""
vs2013_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs2013_runtime_dll_dirs:
vs_runtime_dll_dirs = SetEnvironmentAndGetRuntimeDllDirs()
if not vs_runtime_dll_dirs:
return
x64_runtime, x86_runtime = vs2013_runtime_dll_dirs
x64_runtime, x86_runtime = vs_runtime_dll_dirs
runtime_dir = x64_runtime if target_cpu == 'x64' else x86_runtime
_CopyRuntime2013(
target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + '.dll')
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=False)
if configuration == 'Debug':
_CopyRuntime2013(
target_dir, runtime_dir, 'msvc%s' + _VersionNumber() + 'd.dll')
_CopyRuntime(target_dir, runtime_dir, target_cpu, debug=True)
def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if os.environ.get('GYP_MSVS_VERSION') == '2015':
return ['5a85cf1ce842f7cc96b9d17039a445a9dc9cf0dd']
if GetVisualStudioVersion() == '2015':
# Update 2.
return ['95ddda401ec5678f15eeed01d2bee08fcbc5ee97']
else:
# Default to VS2013.
return ['9ff97c632ae1fee0c98bcd53e71770eb3a0d8deb']
return ['03a4e939cd325d6bc5216af41b92d02dda1366a6']
def ShouldUpdateToolchain():
"""Check if the toolchain should be upgraded."""
if not os.path.exists(json_data_file):
return True
with open(json_data_file, 'r') as tempf:
toolchain_data = json.load(tempf)
version = toolchain_data['version']
env_version = GetVisualStudioVersion()
# If there's a mismatch between the version set in the environment and the one
# in the json file then the toolchain should be updated.
return version != env_version
def Update(force=False):
@ -214,6 +308,9 @@ def Update(force=False):
depot_tools_win_toolchain):
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
# Necessary so that get_toolchain_if_necessary.py will put the VS toolkit
# in the correct directory.
os.environ['GYP_MSVS_VERSION'] = GetVisualStudioVersion()
get_toolchain_args = [
sys.executable,
os.path.join(depot_tools_path,
@ -228,6 +325,12 @@ def Update(force=False):
return 0
def NormalizePath(path):
while path.endswith("\\"):
path = path[:-1]
return path
def GetToolchainDir():
"""Gets location information about the current toolchain (must have been
previously updated by 'update'). This is used for the GN build."""
@ -235,7 +338,7 @@ def GetToolchainDir():
# If WINDOWSSDKDIR is not set, search the default SDK path and set it.
if not 'WINDOWSSDKDIR' in os.environ:
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\8.1'
default_sdk_path = 'C:\\Program Files (x86)\\Windows Kits\\10'
if os.path.isdir(default_sdk_path):
os.environ['WINDOWSSDKDIR'] = default_sdk_path
@ -245,11 +348,11 @@ vs_version = "%s"
wdk_dir = "%s"
runtime_dirs = "%s"
''' % (
os.environ['GYP_MSVS_OVERRIDE_PATH'],
os.environ['WINDOWSSDKDIR'],
os.environ['GYP_MSVS_VERSION'],
os.environ.get('WDK_DIR', ''),
';'.join(runtime_dll_dirs or ['None']))
NormalizePath(os.environ['GYP_MSVS_OVERRIDE_PATH']),
NormalizePath(os.environ['WINDOWSSDKDIR']),
GetVisualStudioVersion(),
NormalizePath(os.environ.get('WDK_DIR', '')),
os.path.pathsep.join(runtime_dll_dirs or ['None']))
def main():

5
deps/v8/include/libplatform/DEPS

@ -0,0 +1,5 @@
specific_include_rules = {
"libplatform\.h": [
"+libplatform/v8-tracing.h",
],
}

9
deps/v8/include/libplatform/libplatform.h

@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
namespace v8 {
@ -31,6 +32,14 @@ v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
*/
bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
/**
* Attempts to set the tracing controller for the given platform.
*
* The |platform| has to be created using |CreateDefaultPlatform|.
*/
void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller);
} // namespace platform
} // namespace v8

253
deps/v8/include/libplatform/v8-tracing.h

@ -0,0 +1,253 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_LIBPLATFORM_V8_TRACING_H_
#define V8_LIBPLATFORM_V8_TRACING_H_
#include <fstream>
#include <memory>
#include <vector>
namespace v8 {
namespace platform {
namespace tracing {
const int kTraceMaxNumArgs = 2;
class TraceObject {
public:
union ArgValue {
bool as_bool;
uint64_t as_uint;
int64_t as_int;
double as_double;
const void* as_pointer;
const char* as_string;
};
TraceObject() {}
~TraceObject();
void Initialize(char phase, const uint8_t* category_enabled_flag,
const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags);
void UpdateDuration();
void InitializeForTesting(char phase, const uint8_t* category_enabled_flag,
const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags,
int pid, int tid, int64_t ts, int64_t tts,
uint64_t duration, uint64_t cpu_duration);
int pid() const { return pid_; }
int tid() const { return tid_; }
char phase() const { return phase_; }
const uint8_t* category_enabled_flag() const {
return category_enabled_flag_;
}
const char* name() const { return name_; }
const char* scope() const { return scope_; }
uint64_t id() const { return id_; }
uint64_t bind_id() const { return bind_id_; }
int num_args() const { return num_args_; }
const char** arg_names() { return arg_names_; }
uint8_t* arg_types() { return arg_types_; }
ArgValue* arg_values() { return arg_values_; }
unsigned int flags() const { return flags_; }
int64_t ts() { return ts_; }
int64_t tts() { return tts_; }
uint64_t duration() { return duration_; }
uint64_t cpu_duration() { return cpu_duration_; }
private:
int pid_;
int tid_;
char phase_;
const char* name_;
const char* scope_;
const uint8_t* category_enabled_flag_;
uint64_t id_;
uint64_t bind_id_;
int num_args_;
const char* arg_names_[kTraceMaxNumArgs];
uint8_t arg_types_[kTraceMaxNumArgs];
ArgValue arg_values_[kTraceMaxNumArgs];
char* parameter_copy_storage_ = nullptr;
unsigned int flags_;
int64_t ts_;
int64_t tts_;
uint64_t duration_;
uint64_t cpu_duration_;
// Disallow copy and assign
TraceObject(const TraceObject&) = delete;
void operator=(const TraceObject&) = delete;
};
class TraceWriter {
public:
TraceWriter() {}
virtual ~TraceWriter() {}
virtual void AppendTraceEvent(TraceObject* trace_event) = 0;
virtual void Flush() = 0;
static TraceWriter* CreateJSONTraceWriter(std::ostream& stream);
private:
// Disallow copy and assign
TraceWriter(const TraceWriter&) = delete;
void operator=(const TraceWriter&) = delete;
};
class TraceBufferChunk {
public:
explicit TraceBufferChunk(uint32_t seq);
void Reset(uint32_t new_seq);
bool IsFull() const { return next_free_ == kChunkSize; }
TraceObject* AddTraceEvent(size_t* event_index);
TraceObject* GetEventAt(size_t index) { return &chunk_[index]; }
uint32_t seq() const { return seq_; }
size_t size() const { return next_free_; }
static const size_t kChunkSize = 64;
private:
size_t next_free_ = 0;
TraceObject chunk_[kChunkSize];
uint32_t seq_;
// Disallow copy and assign
TraceBufferChunk(const TraceBufferChunk&) = delete;
void operator=(const TraceBufferChunk&) = delete;
};
class TraceBuffer {
public:
TraceBuffer() {}
virtual ~TraceBuffer() {}
virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0;
virtual TraceObject* GetEventByHandle(uint64_t handle) = 0;
virtual bool Flush() = 0;
static const size_t kRingBufferChunks = 1024;
static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks,
TraceWriter* trace_writer);
private:
// Disallow copy and assign
TraceBuffer(const TraceBuffer&) = delete;
void operator=(const TraceBuffer&) = delete;
};
// Options determines how the trace buffer stores data.
enum TraceRecordMode {
// Record until the trace buffer is full.
RECORD_UNTIL_FULL,
// Record until the user ends the trace. The trace buffer is a fixed size
// and we use it as a ring buffer during recording.
RECORD_CONTINUOUSLY,
// Record until the trace buffer is full, but with a huge buffer size.
RECORD_AS_MUCH_AS_POSSIBLE,
// Echo to console. Events are discarded.
ECHO_TO_CONSOLE,
};
class TraceConfig {
public:
typedef std::vector<std::string> StringList;
static TraceConfig* CreateDefaultTraceConfig();
TraceConfig()
: enable_sampling_(false),
enable_systrace_(false),
enable_argument_filter_(false) {}
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
bool IsSamplingEnabled() const { return enable_sampling_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
void EnableSampling() { enable_sampling_ = true; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableArgumentFilter() { enable_argument_filter_ = true; }
void AddIncludedCategory(const char* included_category);
void AddExcludedCategory(const char* excluded_category);
bool IsCategoryGroupEnabled(const char* category_group) const;
private:
TraceRecordMode record_mode_;
bool enable_sampling_ : 1;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
StringList included_categories_;
StringList excluded_categories_;
// Disallow copy and assign
TraceConfig(const TraceConfig&) = delete;
void operator=(const TraceConfig&) = delete;
};
class TracingController {
public:
enum Mode { DISABLED = 0, RECORDING_MODE };
// The pointer returned from GetCategoryGroupEnabledInternal() points to a
// value with zero or more of the following bits. Used in this class only.
// The TRACE_EVENT macros should only use the value as a bool.
// These values must be in sync with macro values in TraceEvent.h in Blink.
enum CategoryGroupEnabledFlags {
// Category group enabled for the recording mode.
ENABLED_FOR_RECORDING = 1 << 0,
// Category group enabled by SetEventCallbackEnabled().
ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
// Category group enabled to export events to ETW.
ENABLED_FOR_ETW_EXPORT = 1 << 3
};
TracingController() {}
void Initialize(TraceBuffer* trace_buffer);
const uint8_t* GetCategoryGroupEnabled(const char* category_group);
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
uint64_t AddTraceEvent(char phase, const uint8_t* category_enabled_flag,
const char* name, const char* scope, uint64_t id,
uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags);
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle);
void StartTracing(TraceConfig* trace_config);
void StopTracing();
private:
const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
void UpdateCategoryGroupEnabledFlag(size_t category_index);
void UpdateCategoryGroupEnabledFlags();
std::unique_ptr<TraceBuffer> trace_buffer_;
std::unique_ptr<TraceConfig> trace_config_;
Mode mode_ = DISABLED;
// Disallow copy and assign
TracingController(const TracingController&) = delete;
void operator=(const TracingController&) = delete;
};
} // namespace tracing
} // namespace platform
} // namespace v8
#endif // V8_LIBPLATFORM_V8_TRACING_H_

27
deps/v8/include/v8-debug.h

@ -18,13 +18,11 @@ enum DebugEvent {
Exception = 2,
NewFunction = 3,
BeforeCompile = 4,
AfterCompile = 5,
AfterCompile = 5,
CompileError = 6,
PromiseEvent = 7,
AsyncTaskEvent = 8,
AsyncTaskEvent = 7,
};
class V8_EXPORT Debug {
public:
/**
@ -127,6 +125,8 @@ class V8_EXPORT Debug {
*/
virtual ClientData* GetClientData() const = 0;
virtual Isolate* GetIsolate() const = 0;
virtual ~EventDetails() {}
};
@ -157,9 +157,6 @@ class V8_EXPORT Debug {
static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>());
V8_DEPRECATED("Use version with an Isolate",
static bool SetDebugEventListener(
EventCallback that, Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
@ -174,8 +171,6 @@ class V8_EXPORT Debug {
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
V8_DEPRECATED("Use version with an Isolate",
static void SetMessageHandler(MessageHandler handler));
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@ -199,9 +194,6 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
static V8_DEPRECATED("Use maybe version",
Local<Value> Call(v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
// TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun,
@ -210,8 +202,6 @@ class V8_EXPORT Debug {
/**
* Returns a mirror object for the given object.
*/
static V8_DEPRECATED("Use maybe version",
Local<Value> GetMirror(v8::Local<v8::Value> obj));
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj);
@ -247,8 +237,6 @@ class V8_EXPORT Debug {
* of this method.
*/
static void ProcessDebugMessages(Isolate* isolate);
V8_DEPRECATED("Use version with an Isolate",
static void ProcessDebugMessages());
/**
* Debugger is running in its own context which is entered while debugger
@ -258,9 +246,12 @@ class V8_EXPORT Debug {
* least one DebugEventListener or MessageHandler is set.
*/
static Local<Context> GetDebugContext(Isolate* isolate);
V8_DEPRECATED("Use version with an Isolate",
static Local<Context> GetDebugContext());
/**
* While in the debug context, this method returns the top-most non-debug
* context, if it exists.
*/
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate);
/**
* Enable/disable LiveEdit functionality for the given Isolate

4
deps/v8/include/v8-experimental.h

@ -31,13 +31,17 @@ class V8_EXPORT FastAccessorBuilder {
ValueId IntegerConstant(int int_constant);
ValueId GetReceiver();
ValueId LoadInternalField(ValueId value_id, int field_no);
ValueId LoadInternalFieldUnchecked(ValueId value_id, int field_no);
ValueId LoadValue(ValueId value_id, int offset);
ValueId LoadObject(ValueId value_id, int offset);
ValueId ToSmi(ValueId value_id);
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void Goto(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
ValueId Call(v8::FunctionCallback callback, ValueId value_id);

6
deps/v8/include/v8-platform.h

@ -152,9 +152,9 @@ class Platform {
*/
virtual uint64_t AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
uint64_t id, uint64_t bind_id, int32_t num_args, const char** arg_names,
const uint8_t* arg_types, const uint64_t* arg_values,
unsigned int flags) {
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values, unsigned int flags) {
return 0;
}

99
deps/v8/include/v8-profiler.h

@ -46,6 +46,75 @@ template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
namespace v8 {
// TickSample captures the information collected for each sample.
struct TickSample {
// Internal profiling (with --prof + tools/$OS-tick-processor) wants to
// include the runtime function we're calling. Externally exposed tick
// samples don't care.
enum RecordCEntryFrame { kIncludeCEntryFrame, kSkipCEntryFrame };
TickSample()
: state(OTHER),
pc(nullptr),
external_callback_entry(nullptr),
frames_count(0),
has_external_callback(false),
update_stats(true) {}
/**
* Initialize a tick sample from the isolate.
* \param isolate The isolate.
* \param state Execution state.
* \param record_c_entry_frame Include or skip the runtime function.
* \param update_stats Whether update the sample to the aggregated stats.
* \param use_simulator_reg_state When set to true and V8 is running under a
* simulator, the method will use the simulator
* register state rather than the one provided
* with |state| argument. Otherwise the method
* will use provided register |state| as is.
*/
void Init(Isolate* isolate, const v8::RegisterState& state,
RecordCEntryFrame record_c_entry_frame, bool update_stats,
bool use_simulator_reg_state = true);
/**
* Get a call stack sample from the isolate.
* \param isolate The isolate.
* \param state Register state.
* \param record_c_entry_frame Include or skip the runtime function.
* \param frames Caller allocated buffer to store stack frames.
* \param frames_limit Maximum number of frames to capture. The buffer must
* be large enough to hold the number of frames.
* \param sample_info The sample info is filled up by the function
* provides number of actual captured stack frames and
* the current VM state.
* \param use_simulator_reg_state When set to true and V8 is running under a
* simulator, the method will use the simulator
* register state rather than the one provided
* with |state| argument. Otherwise the method
* will use provided register |state| as is.
* \note GetStackSample is thread and signal safe and should only be called
* when the JS thread is paused or interrupted.
* Otherwise the behavior is undefined.
*/
static bool GetStackSample(Isolate* isolate, v8::RegisterState* state,
RecordCEntryFrame record_c_entry_frame,
void** frames, size_t frames_limit,
v8::SampleInfo* sample_info,
bool use_simulator_reg_state = true);
StateTag state; // The state of the VM.
void* pc; // Instruction pointer.
union {
void* tos; // Top stack value (*sp).
void* external_callback_entry;
};
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
void* stack[kMaxFramesCount]; // Call stack.
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
bool update_stats : 1; // Whether the sample should update aggregated stats.
};
/**
* CpuProfileNode represents a node in a call graph.
*/
@ -103,7 +172,9 @@ class V8_EXPORT CpuProfileNode {
unsigned GetHitCount() const;
/** Returns function entry UID. */
unsigned GetCallUid() const;
V8_DEPRECATE_SOON(
"Use GetScriptId, GetLineNumber, and GetColumnNumber instead.",
unsigned GetCallUid() const);
/** Returns id of the node. The id is unique within the tree */
unsigned GetNodeId() const;
@ -173,13 +244,24 @@ class V8_EXPORT CpuProfile {
void Delete();
};
/**
* Interface for controlling CPU profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetCpuProfiler.
* profiler can be created using v8::CpuProfiler::New method.
*/
class V8_EXPORT CpuProfiler {
public:
/**
* Creates a new CPU profiler for the |isolate|. The isolate must be
* initialized. The profiler object must be disposed after use by calling
* |Dispose| method.
*/
static CpuProfiler* New(Isolate* isolate);
/**
* Disposes the CPU profiler object.
*/
void Dispose();
/**
* Changes default CPU profiler sampling interval to the specified number
* of microseconds. Default interval is 1000us. This method must be called
@ -515,6 +597,11 @@ class V8_EXPORT AllocationProfile {
*/
class V8_EXPORT HeapProfiler {
public:
enum SamplingFlags {
kSamplingNoFlags = 0,
kSamplingForceGC = 1 << 0,
};
/**
* Callback function invoked for obtaining RetainedObjectInfo for
* the given JavaScript wrapper object. It is prohibited to enter V8
@ -640,7 +727,8 @@ class V8_EXPORT HeapProfiler {
* Returns false if a sampling heap profiler is already running.
*/
bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
int stack_depth = 16);
int stack_depth = 16,
SamplingFlags flags = kSamplingNoFlags);
/**
* Stops the sampling heap profile and discards the current profile.
@ -688,7 +776,6 @@ class V8_EXPORT HeapProfiler {
HeapProfiler& operator=(const HeapProfiler&);
};
/**
* Interface for providing information about embedder's objects
* held by global handles. This information is reported in two ways:
@ -703,7 +790,7 @@ class V8_EXPORT HeapProfiler {
* were not previously reported via AddObjectGroup.
*
* Thus, if an embedder wants to provide information about native
* objects for heap snapshots, he can do it in a GC prologue
* objects for heap snapshots, it can do it in a GC prologue
* handler, and / or by assigning wrapper class ids in the following way:
*
* 1. Bind a callback to class id by calling SetWrapperClassInfoProvider.

25
deps/v8/include/v8-util.h

@ -95,12 +95,12 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
MapType* map, const K& key, Local<V> value) {
return NULL;
}
static MapType* MapFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data) {
static MapType* MapFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
return NULL;
}
static K KeyFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data) {
static K KeyFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) { }
@ -205,6 +205,17 @@ class PersistentValueMapBase {
reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))));
}
/**
* Call V8::RegisterExternallyReferencedObject with the map value for given
* key.
*/
void RegisterExternallyReferencedObject(K& key) {
DCHECK(Contains(key));
V8::RegisterExternallyReferencedObject(
reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
reinterpret_cast<internal::Isolate*>(GetIsolate()));
}
/**
* Return value for key and remove it from the map.
*/
@ -402,11 +413,11 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
private:
static void WeakCallback(
const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
PersistentValueMap<K, V, Traits>* persistentValueMap =
Traits::MapFromWeakCallbackData(data);
K key = Traits::KeyFromWeakCallbackData(data);
Traits::MapFromWeakCallbackInfo(data);
K key = Traits::KeyFromWeakCallbackInfo(data);
Traits::Dispose(data.GetIsolate(),
persistentValueMap->Remove(key).Pass(), key);
Traits::DisposeCallbackData(data.GetParameter());

6
deps/v8/include/v8-version.h

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 1
#define V8_BUILD_NUMBER 281
#define V8_PATCH_LEVEL 82
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 500
#define V8_PATCH_LEVEL 27
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

887
deps/v8/include/v8.h

File diff suppressed because it is too large

4
deps/v8/include/v8config.h

@ -63,7 +63,6 @@
// V8_OS_FREEBSD - FreeBSD
// V8_OS_LINUX - Linux
// V8_OS_MACOSX - Mac OS X
// V8_OS_NACL - Native Client
// V8_OS_NETBSD - NetBSD
// V8_OS_OPENBSD - OpenBSD
// V8_OS_POSIX - POSIX compatible (mostly everything except Windows)
@ -80,9 +79,6 @@
# define V8_OS_BSD 1
# define V8_OS_MACOSX 1
# define V8_OS_POSIX 1
#elif defined(__native_client__)
# define V8_OS_NACL 1
# define V8_OS_POSIX 1
#elif defined(__CYGWIN__)
# define V8_OS_CYGWIN 1
# define V8_OS_POSIX 1

58
deps/v8/infra/config/cq.cfg

@ -25,17 +25,38 @@ verifiers {
try_job {
buckets {
name: "tryserver.v8"
name: "master.tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_linux64_asan_rel" }
builders { name: "v8_linux64_avx2_rel" }
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
triggered_by: "v8_linux64_asan_rel_ng"
}
builders { name: "v8_linux64_avx2_rel_ng" }
builders {
name: "v8_linux64_avx2_rel_ng_triggered"
triggered_by: "v8_linux64_avx2_rel_ng"
}
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
triggered_by: "v8_linux64_gyp_rel_ng"
}
builders { name: "v8_linux64_rel_ng" }
builders {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
builders { name: "v8_linux_arm64_rel" }
builders { name: "v8_linux_arm_rel" }
builders { name: "v8_linux_arm64_rel_ng" }
builders {
name: "v8_linux_arm64_rel_ng_triggered"
triggered_by: "v8_linux_arm64_rel_ng"
}
builders { name: "v8_linux_arm_rel_ng" }
builders {
name: "v8_linux_arm_rel_ng_triggered"
triggered_by: "v8_linux_arm_rel_ng"
}
builders { name: "v8_linux_chromium_gn_rel" }
builders { name: "v8_linux_dbg_ng" }
builders {
@ -45,13 +66,21 @@ verifiers {
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel" }
builders { name: "v8_linux_nodcheck_rel_ng" }
builders {
name: "v8_linux_nodcheck_rel_ng_triggered"
triggered_by: "v8_linux_nodcheck_rel_ng"
}
builders { name: "v8_linux_rel_ng" }
builders {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
builders { name: "v8_mac_rel" }
builders { name: "v8_mac_rel_ng" }
builders {
name: "v8_mac_rel_ng_triggered"
triggered_by: "v8_mac_rel_ng"
}
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel_ng" }
builders {
@ -59,7 +88,11 @@ verifiers {
triggered_by: "v8_win64_rel_ng"
}
builders { name: "v8_win_compile_dbg" }
builders { name: "v8_win_nosnap_shared_compile_rel" }
builders { name: "v8_win_nosnap_shared_rel_ng" }
builders {
name: "v8_win_nosnap_shared_rel_ng_triggered"
triggered_by: "v8_win_nosnap_shared_rel_ng"
}
builders { name: "v8_win_rel_ng" }
builders {
name: "v8_win_rel_ng_triggered"
@ -67,13 +100,20 @@ verifiers {
}
builders {
name: "v8_linux_blink_rel"
experiment_percentage: 20
experiment_percentage: 100
}
builders {
name: "v8_linux64_sanitizer_coverage_rel"
experiment_percentage: 100
}
}
buckets {
name: "master.tryserver.chromium.win"
builders {
name: "win_chromium_compile_dbg_ng"
experiment_percentage: 100
}
}
}
sign_cla {}

35
deps/v8/infra/mb/PRESUBMIT.py

@ -0,0 +1,35 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
def _CommonChecks(input_api, output_api):
results = []
# Validate the format of the mb_config.pyl file.
mb_script = input_api.os_path.join(input_api.PresubmitLocalPath(), '..',
'..', 'tools', 'mb', 'mb.py')
mb_config_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'mb_config.pyl')
cmd = [input_api.python_executable, mb_script, 'validate', '--config-file',
mb_config_path]
kwargs = {'cwd': input_api.PresubmitLocalPath()}
results.extend(input_api.RunTests([
input_api.Command(name='mb_validate',
cmd=cmd, kwargs=kwargs,
message=output_api.PresubmitError)]))
return results
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)

670
deps/v8/infra/mb/mb_config.pyl

@ -0,0 +1,670 @@
# Copyright 2016 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
# This is a map of buildbot master names -> buildbot builder names ->
# config names (where each config name is a key in the 'configs' dict,
# below). MB uses this dict to look up which config to use for a given bot.
# Bots are ordered by appearance on waterfall.
'masters': {
'developer_default': {
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
'x86.debug': 'default_debug_x86',
'x86.optdebug': 'default_optdebug_x86',
'x86.release': 'default_release_x86',
},
'client.dart.fyi': {
'v8-linux-release': 'gyp_release_x86_disassembler',
'v8-win-release': 'gyp_release_x86_disassembler',
'v8-mac-release': 'gyp_release_x86_disassembler',
},
'client.dynamorio': {
'linux-v8-dr': 'gyp_release_x64',
},
'client.v8': {
# Linux.
'V8 Linux - builder': 'gn_release_x86_gcmole',
'V8 Linux - debug builder': 'gn_debug_x86',
'V8 Linux - nosnap builder': 'gn_release_x86_no_snap',
'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
'V8 Linux - noi18n - debug': 'gyp_debug_x86_no_i18n',
# Linux64.
'V8 Linux64 - builder': 'gn_release_x64',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
'V8 Linux64 - gyp': 'gyp_release_x64',
# Windows.
'V8 Win32 - builder': 'gyp_release_x86_minimal_symbols',
'V8 Win32 - debug builder': 'gyp_debug_x86_minimal_symbols',
'V8 Win32 - nosnap - shared':
'gyp_release_x86_no_snap_shared_minimal_symbols',
'V8 Win64': 'gyp_release_x64_minimal_symbols',
'V8 Win64 - debug': 'gyp_debug_x64_minimal_symbols',
'V8 Win64 - clang': 'gyp_release_x64_clang',
# Mac.
'V8 Mac': 'gn_release_x86',
'V8 Mac - debug': 'gn_debug_x86',
'V8 Mac64': 'gn_release_x64',
'V8 Mac64 - debug': 'gn_debug_x64',
'V8 Mac GC Stress': 'gn_debug_x86',
'V8 Mac64 ASAN': 'gyp_release_x64_asan',
# Sanitizers.
'V8 Linux64 ASAN': 'gyp_release_x64_asan',
'V8 Linux64 TSAN': 'gn_release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz.
'V8 Linux64 ASAN no inline - release builder':
'gyp_release_x64_asan_symbolized_edge_verify_heap',
'V8 Linux64 ASAN - debug builder': 'gyp_debug_x64_asan_edge',
'V8 Linux64 ASAN arm64 - debug builder':
'gyp_debug_simulate_arm64_asan_edge',
'V8 Linux ASAN arm - debug builder':
'gyp_debug_simulate_arm_asan_edge',
'V8 Linux ASAN mipsel - debug builder':
'gyp_debug_simulate_mipsel_asan_edge',
# Misc.
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
# FYI.
'V8 Linux - swarming staging': 'gn_release_x64',
# TODO(machenbach): Figure out if symbolized is still needed. The
# original config also specified -O1, which we dropped because chromium
# doesn't have it (anymore).
'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized',
'V8 Linux - vtunejit': 'gyp_debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage',
'V8 Linux - predictable': 'gyp_release_x86_predictable',
'V8 Linux - full debug': 'gyp_full_debug_x86',
'V8 Linux - interpreted regexp': 'gyp_release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86',
},
'client.v8.ports': {
# Arm.
'V8 Arm - builder': 'gyp_release_arm',
'V8 Arm - debug builder': 'gyp_debug_arm',
'V8 Android Arm - builder': 'gyp_release_android_arm',
'V8 Linux - arm - sim': 'gyp_release_simulate_arm',
'V8 Linux - arm - sim - debug': 'gyp_debug_simulate_arm',
# Arm64.
'V8 Android Arm64 - builder': 'gyp_release_android_arm64',
'V8 Linux - arm64 - sim': 'gn_release_simulate_arm64',
'V8 Linux - arm64 - sim - debug': 'gn_debug_simulate_arm64',
'V8 Linux - arm64 - sim - nosnap - debug':
'gn_debug_simulate_arm64_no_snap',
'V8 Linux - arm64 - sim - gc stress': 'gn_debug_simulate_arm64',
# Mips.
'V8 Mips - builder': 'gyp_release_mips_no_snap_no_i18n',
'V8 Linux - mipsel - sim - builder': 'gyp_release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'gyp_release_simulate_mips64el',
# PPC.
'V8 Linux - ppc - sim': 'gyp_release_simulate_ppc',
'V8 Linux - ppc64 - sim': 'gyp_release_simulate_ppc64',
# S390.
'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
# X87.
'V8 Linux - x87 - nosnap - debug builder':
'gyp_debug_simulate_x87_no_snap',
},
'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86',
'V8 Linux - beta branch - debug': 'gn_debug_x86',
'V8 Linux - stable branch': 'gn_release_x86',
'V8 Linux - stable branch - debug': 'gn_debug_x86',
'V8 Linux64 - beta branch': 'gyp_release_x64',
'V8 Linux64 - beta branch - debug': 'gn_debug_x64',
'V8 Linux64 - stable branch': 'gn_release_x64',
'V8 Linux64 - stable branch - debug': 'gn_debug_x64',
'V8 arm - sim - beta branch': 'gyp_release_simulate_arm',
'V8 arm - sim - beta branch - debug': 'gyp_debug_simulate_arm',
'V8 arm - sim - stable branch': 'gyp_release_simulate_arm',
'V8 arm - sim - stable branch - debug': 'gyp_debug_simulate_arm',
'V8 mips64el - sim - beta branch': 'gyp_release_simulate_mips64el',
'V8 mips64el - sim - stable branch': 'gyp_release_simulate_mips64el',
'V8 mipsel - sim - beta branch': 'gyp_release_simulate_mipsel',
'V8 mipsel - sim - stable branch': 'gyp_release_simulate_mipsel',
'V8 ppc - sim - beta branch': 'gyp_release_simulate_ppc',
'V8 ppc - sim - stable branch': 'gyp_release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'gyp_release_simulate_ppc64',
'V8 ppc64 - sim - stable branch': 'gyp_release_simulate_ppc64',
'V8 s390 - sim - beta branch': 'gyp_release_simulate_s390',
'V8 s390 - sim - stable branch': 'gyp_release_simulate_s390',
'V8 s390x - sim - beta branch': 'gyp_release_simulate_s390x',
'V8 s390x - sim - stable branch': 'gyp_release_simulate_s390x',
},
'tryserver.v8': {
'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
'v8_linux_noi18n_rel_ng': 'gyp_release_x86_no_i18n_trybot',
'v8_linux_gc_stress_dbg': 'gyp_debug_x86_trybot',
'v8_linux_nosnap_rel': 'gn_release_x86_no_snap_trybot',
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux64_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
'v8_linux64_asan_rel_ng': 'gyp_release_x64_asan_minimal_symbols',
'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
'gyp_release_x64_asan_minimal_symbols_coverage',
'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
'v8_win_dbg': 'gyp_debug_x86_trybot',
'v8_win_compile_dbg': 'gyp_debug_x86_trybot',
'v8_win_rel_ng': 'gyp_release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
'gyp_release_x86_no_snap_shared_minimal_symbols',
'v8_win64_dbg': 'gyp_debug_x64_minimal_symbols',
'v8_win64_rel_ng': 'gyp_release_x64_trybot',
'v8_mac_rel_ng': 'gn_release_x86_trybot',
'v8_mac_dbg': 'gn_debug_x86_trybot',
'v8_mac_gc_stress_dbg': 'gn_debug_x86_trybot',
'v8_mac64_rel': 'gn_release_x64_trybot',
'v8_mac64_dbg': 'gn_debug_x64_minimal_symbols',
'v8_mac64_asan_rel': 'gyp_release_x64_asan',
'v8_linux_arm_rel_ng': 'gyp_release_simulate_arm_trybot',
'v8_linux_arm_dbg': 'gyp_debug_simulate_arm',
'v8_linux_arm_armv8a_rel': 'gyp_release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'gyp_debug_simulate_arm',
'v8_linux_arm64_rel_ng': 'gn_release_simulate_arm64_trybot',
'v8_linux_arm64_dbg': 'gn_debug_simulate_arm64',
'v8_linux_arm64_gc_stress_dbg': 'gn_debug_simulate_arm64',
'v8_linux_mipsel_compile_rel': 'gyp_release_simulate_mipsel',
'v8_linux_mips64el_compile_rel': 'gyp_release_simulate_mips64el',
'v8_android_arm_compile_rel': 'gyp_release_android_arm',
},
},
# To ease readability, config values are ordered by:
# gyp/gn, release/debug, arch type, other values alphabetically.
'configs': {
# Developer default configs.
'default_debug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks'],
'default_release_x64': [
'gn', 'release', 'x64'],
'default_debug_x86': [
'gn', 'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x86': [
'gn', 'debug', 'x86', 'v8_enable_slow_dchecks'],
'default_release_x86': [
'gn', 'release', 'x86'],
# GN debug configs for simulators.
'gn_debug_simulate_arm64': [
'gn', 'debug_bot', 'simulate_arm64', 'swarming'],
'gn_debug_simulate_arm64_no_snap': [
'gn', 'debug_bot', 'simulate_arm64', 'swarming', 'v8_snapshot_none'],
# GN release configs for simulators.
'gn_release_simulate_arm64': [
'gn', 'release_bot', 'simulate_arm64', 'swarming'],
'gn_release_simulate_arm64_msan': [
'gn', 'release_bot', 'simulate_arm64', 'msan', 'swarming'],
'gn_release_simulate_arm64_msan_minimal_symbols': [
'gn', 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols',
'swarming'],
'gn_release_simulate_arm64_trybot': [
'gn', 'release_trybot', 'simulate_arm64', 'swarming'],
# GN release configs for x64.
'gn_release_x64': [
'gn', 'release_bot', 'x64', 'swarming'],
'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
'gn_release_x64_trybot': [
'gn', 'release_trybot', 'x64', 'swarming'],
'gn_release_x64_tsan': [
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
# GN debug configs for x64.
'gn_debug_x64': [
'gn', 'debug_bot', 'x64', 'swarming'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_debug_x64_trybot': [
'gn', 'debug_trybot', 'x64', 'swarming'],
'gn_debug_x64_valgrind': [
'gn', 'debug_bot', 'x64', 'swarming', 'valgrind'],
# GN debug configs for x86.
'gn_debug_x86': [
'gn', 'debug_bot', 'x86', 'swarming'],
'gn_debug_x86_no_snap': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_no_snap_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_trybot': [
'gn', 'debug_trybot', 'x86', 'swarming'],
# GN release configs for x86.
'gn_release_x86': [
'gn', 'release_bot', 'x86', 'swarming'],
'gn_release_x86_gcc': [
'gn', 'release_bot', 'x86', 'gcc'],
'gn_release_x86_gcc_minimal_symbols': [
'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
'gn_release_x86_gcmole': [
'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_gcmole_trybot': [
'gn', 'release_trybot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_minimal_symbols': [
'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_release_x86_no_snap': [
'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_no_snap_trybot': [
'gn', 'release_trybot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_shared_verify_heap': [
'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
'gn_release_x86_trybot': [
'gn', 'release_trybot', 'x86', 'swarming'],
# Gyp debug configs for arm.
'gyp_debug_arm': [
'gyp', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
# Gyp debug configs for simulators.
'gyp_debug_simulate_arm': [
'gyp', 'debug_bot', 'simulate_arm', 'swarming'],
'gyp_debug_simulate_arm_asan_edge': [
'gyp', 'debug_bot', 'simulate_arm', 'asan', 'edge'],
'gyp_debug_simulate_arm64_asan_edge': [
'gyp', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
'gyp_debug_simulate_mipsel_asan_edge': [
'gyp', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
'gyp_debug_simulate_x87_no_snap': [
'gyp', 'debug_bot', 'simulate_x87', 'swarming', 'v8_snapshot_none'],
# Gyp debug configs for x64.
'gyp_debug_x64_asan_edge': [
'gyp', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
'gyp_debug_x64_minimal_symbols': [
'gyp', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
# Gyp debug configs for x86.
'gyp_debug_x86': [
'gyp', 'debug_bot', 'x86', 'swarming'],
'gyp_debug_x86_minimal_symbols': [
'gyp', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
'gyp_debug_x86_trybot': [
'gyp', 'debug_trybot', 'x86', 'swarming'],
'gyp_debug_x86_no_i18n': [
'gyp', 'debug_bot', 'x86', 'v8_no_i18n'],
'gyp_debug_x86_vtunejit': [
'gyp', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
'gyp_full_debug_x86': [
'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
# Gyp release configs for arm.
'gyp_release_arm': [
'gyp', 'release_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
'gyp_release_android_arm': [
'gyp', 'release_bot', 'arm', 'android', 'crosscompile', 'swarming'],
'gyp_release_android_arm64': [
'gyp', 'release_bot', 'arm64', 'android', 'crosscompile', 'swarming'],
# Gyp release configs for mips.
'gyp_release_mips_no_snap_no_i18n': [
'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
'v8_snapshot_none'],
# Gyp release configs for simulators.
'gyp_release_simulate_arm': [
'gyp', 'release_bot', 'simulate_arm', 'swarming'],
'gyp_release_simulate_arm_trybot': [
'gyp', 'release_trybot', 'simulate_arm', 'swarming'],
'gyp_release_simulate_mipsel': [
'gyp', 'release_bot', 'simulate_mipsel', 'swarming'],
'gyp_release_simulate_mips64el': [
'gyp', 'release_bot', 'simulate_mips64el', 'swarming'],
'gyp_release_simulate_ppc': [
'gyp', 'release_bot', 'simulate_ppc', 'swarming'],
'gyp_release_simulate_ppc64': [
'gyp', 'release_bot', 'simulate_ppc64', 'swarming'],
'gyp_release_simulate_s390': [
'gyp', 'release_bot', 'simulate_s390', 'swarming'],
'gyp_release_simulate_s390x': [
'gyp', 'release_bot', 'simulate_s390x', 'swarming'],
# Gyp release configs for x64.
'gyp_release_x64': [
'gyp', 'release_bot', 'x64', 'swarming'],
'gyp_release_x64_asan': [
'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'swarming'],
'gyp_release_x64_asan_minimal_symbols': [
'gyp', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
'swarming'],
'gyp_release_x64_asan_minimal_symbols_coverage': [
'gyp', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
'minimal_symbols', 'swarming'],
'gyp_release_x64_asan_symbolized_edge_verify_heap': [
'gyp', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
'v8_verify_heap'],
'gyp_release_x64_cfi_symbolized': [
'gyp', 'release_bot', 'x64', 'cfi', 'swarming', 'symbolized'],
'gyp_release_x64_clang': [
'gyp', 'release_bot', 'x64', 'clang', 'swarming'],
'gyp_release_x64_gcc_coverage': [
'gyp', 'release_bot', 'x64', 'coverage', 'gcc'],
'gyp_release_x64_minimal_symbols': [
'gyp', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
'gyp_release_x64_trybot': [
'gyp', 'release_trybot', 'x64', 'swarming'],
# Gyp release configs for x86.
'gyp_release_x86_disassembler': [
'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gyp_release_x86_interpreted_regexp': [
'gyp', 'release_bot', 'x86', 'v8_interpreted_regexp'],
'gyp_release_x86_minimal_symbols': [
'gyp', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
'gyp_release_x86_no_i18n_trybot': [
'gyp', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
'gyp_release_x86_no_snap_shared_minimal_symbols': [
'gyp', 'release', 'x86', 'goma', 'minimal_symbols', 'shared', 'swarming',
'v8_snapshot_none'],
'gyp_release_x86_predictable': [
'gyp', 'release_bot', 'x86', 'v8_enable_verify_predictable'],
'gyp_release_x86_trybot': [
'gyp', 'release_trybot', 'x86', 'swarming'],
},
'mixins': {
'android': {
'gn_args': 'target_os="android" v8_android_log_stdout=true',
'gyp_defines': 'OS=android v8_android_log_stdout=1',
},
'arm': {
'gn_args': 'target_cpu="arm"',
'gyp_defines': 'target_arch=arm',
},
'arm64': {
'gn_args': 'target_cpu="arm64"',
'gyp_defines': 'target_arch=arm64',
},
'asan': {
'gn_args': 'is_asan=true',
'gyp_defines': 'clang=1 asan=1',
},
'bb': {
'gn_args': 'sanitizer_coverage_flags="bb"',
'gyp_defines': 'sanitizer_coverage=bb',
},
'cfi': {
'gn_args': 'is_cfi=true use_cfi_diag=true',
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'clang': {
'gn_args': 'is_clang=true',
'gyp_defines': 'clang=1',
},
'coverage': {
# TODO(machenbach): Add this to gn.
'gyp_defines': 'coverage=1',
},
'crosscompile': {
'gyp_crosscompile': True,
},
'dcheck_always_on': {
'gn_args': 'dcheck_always_on=true',
'gyp_defines': 'dcheck_always_on=1',
},
'debug': {
'gn_args': 'is_debug=true v8_enable_backtrace=true',
'gyp_defines': 'v8_enable_backtrace=1',
},
'debug_bot': {
'mixins': [
'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
'v8_optimized_debug'],
},
'debug_trybot': {
'mixins': ['debug_bot', 'minimal_symbols'],
},
'edge': {
'gn_args': 'sanitizer_coverage_flags="edge"',
'gyp_defines': 'sanitizer_coverage=edge',
},
'gcc': {
'gn_args': 'is_clang=false use_sysroot=false',
'gyp_defines': 'clang=0',
},
'gcmole': {
'gn_args': 'v8_gcmole=true',
'gyp_defines': 'gcmole=1',
},
'gn': {'type': 'gn'},
'goma': {
# The MB code will properly escape goma_dir if necessary in the GYP
# code path; the GN code path needs no escaping.
'gn_args': 'use_goma=true',
'gyp_defines': 'use_goma=1',
},
'gyp': {'type': 'gyp'},
'hard_float': {
'gn_args': 'arm_float_abi="hard"',
'gyp_defines': 'arm_float_abi=hard',
},
'lsan': {
'gn_args': 'is_lsan=true',
'gyp_defines': 'lsan=1',
},
'minimal_symbols': {
'gn_args': 'symbol_level=1',
'gyp_defines': 'fastbuild=1',
},
'mips': {
'gn_args': 'target_cpu="mips"',
'gyp_defines': 'target_arch=mips',
},
'msan': {
'gn_args': ('is_msan=true msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=true'),
'gyp_defines': ('clang=1 msan=1 msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=1'),
},
'release': {
'gn_args': 'is_debug=false',
},
'release_bot': {
'mixins': ['release', 'static', 'goma'],
},
'release_trybot': {
'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'],
},
'shared': {
'gn_args': 'is_component_build=true',
'gyp_defines': 'component=shared_library',
},
'simulate_arm': {
'gn_args': 'target_cpu="x86" v8_target_cpu="arm"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=arm',
},
'simulate_arm64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
'gyp_defines': 'target_arch=x64 v8_target_arch=arm64',
},
'simulate_mipsel': {
'gn_args': 'target_cpu="x86" v8_target_cpu="mipsel"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=mipsel',
},
'simulate_mips64el': {
'gn_args': 'target_cpu="x64" v8_target_cpu="mips64el"',
'gyp_defines': 'target_arch=x64 v8_target_arch=mips64el',
},
'simulate_ppc': {
'gn_args': 'target_cpu="x86" v8_target_cpu="ppc"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=ppc',
},
'simulate_ppc64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="ppc64"',
'gyp_defines': 'target_arch=x64 v8_target_arch=ppc64',
},
'simulate_s390': {
'gn_args': 'target_cpu="x86" v8_target_cpu="s390"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=s390',
},
'simulate_s390x': {
'gn_args': 'target_cpu="x64" v8_target_cpu="s390x"',
'gyp_defines': 'target_arch=x64 v8_target_arch=s390x',
},
'simulate_x87': {
'gn_args': 'target_cpu="x86" v8_target_cpu="x87"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=x87',
},
'static': {
'gn_args': 'is_component_build=false',
'gyp_defines': 'component=static_library',
},
'swarming': {
'gn_args': 'v8_test_isolation_mode="prepare"',
'gyp_defines': 'test_isolation_mode=prepare',
},
# TODO(machenbach): Remove the symbolized config after the bots are gone.
'symbolized': {
'gn_args': 'symbolized=true',
'gyp_defines':
'release_extra_cflags="-fno-inline-functions -fno-inline"',
},
'tsan': {
'gn_args': 'is_tsan=true',
'gyp_defines': 'clang=1 tsan=1',
},
'valgrind': {
'gn_args': 'v8_has_valgrind=true',
'gyp_defines': 'has_valgrind=1',
},
'v8_no_i18n': {
'gn_args': 'v8_enable_i18n_support=false',
'gyp_defines': 'v8_enable_i18n_support=0',
},
'v8_enable_disassembler': {
'gn_args': 'v8_enable_disassembler=true',
'gyp_defines': 'v8_enable_disassembler=1',
},
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
'gyp_defines': 'v8_enable_slow_dchecks=1',
},
'v8_enable_verify_predictable': {
'gn_args': 'v8_enable_verify_predictable=true',
'gyp_defines': 'v8_enable_verify_predictable=1',
},
'v8_enable_vtunejit': {
'gn_args': 'v8_enable_vtunejit=true',
'gyp_defines': 'v8_enable_vtunejit=1',
},
'v8_full_debug': {
'gn_args': 'v8_optimized_debug=false',
'gyp_defines': 'v8_optimized_debug=0',
},
'v8_interpreted_regexp': {
'gn_args': 'v8_interpreted_regexp=true',
'gyp_defines': 'v8_interpreted_regexp=1',
},
'v8_optimized_debug': {
# This is the default in gn for debug.
'gyp_defines': 'v8_optimized_debug=1',
},
'v8_snapshot_custom': {
# GN path is relative to project root.
'gn_args': 'v8_embed_script="test/mjsunit/mjsunit.js"',
# Gyp path is relative to src/v8.gyp.
'gyp_defines': 'embed_script=../test/mjsunit/mjsunit.js',
},
'v8_snapshot_internal': {
'gn_args': 'v8_use_external_startup_data=false',
'gyp_defines': 'v8_use_external_startup_data=0',
},
'v8_snapshot_none': {
'gn_args': 'v8_use_snapshot=false',
'gyp_defines': 'v8_use_snapshot=false',
},
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
'gyp_defines': 'v8_enable_verify_heap=1',
},
'x64': {
'gn_args': 'target_cpu="x64"',
'gyp_defines': 'target_arch=x64',
},
'x86': {
'gn_args': 'target_cpu="x86"',
'gyp_defines': 'target_arch=ia32',
},
},
}

18
deps/v8/samples/hello-world.cc

@ -11,29 +11,18 @@
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
// Initialize V8.
V8::InitializeICU();
V8::InitializeICUDefaultLocation(argv[0]);
V8::InitializeExternalStartupData(argv[0]);
Platform* platform = platform::CreateDefaultPlatform();
V8::InitializePlatform(platform);
V8::Initialize();
// Create a new Isolate and make it the current one.
ArrayBufferAllocator allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &allocator;
create_params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope isolate_scope(isolate);
@ -68,5 +57,6 @@ int main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return 0;
}

17
deps/v8/samples/process.cc

@ -38,17 +38,6 @@
using namespace std;
using namespace v8;
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
// These interfaces represent an existing request processing interface.
// The idea is to imagine a real application that uses these interfaces
// and then add scripting capabilities that allow you to interact with
@ -687,7 +676,7 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
@ -699,9 +688,9 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "No script was specified.\n");
return 1;
}
ArrayBufferAllocator array_buffer_allocator;
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &array_buffer_allocator;
create_params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
Isolate* isolate = Isolate::New(create_params);
Isolate::Scope isolate_scope(isolate);
HandleScope scope(isolate);

14
deps/v8/samples/samples.gyp

@ -29,13 +29,14 @@
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 1,
'v8_toolset_for_shell%': 'target',
},
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
'../tools/gyp/v8.gyp:v8',
'../tools/gyp/v8.gyp:v8_libplatform',
'../src/v8.gyp:v8',
'../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
@ -56,10 +57,15 @@
},
'targets': [
{
'target_name': 'shell',
'target_name': 'v8_shell',
'sources': [
'shell.cc',
],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_shell)', ],
}],
],
},
{
'target_name': 'hello-world',

18
deps/v8/samples/shell.cc

@ -63,27 +63,16 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
static bool run_shell;
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
};
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
ShellArrayBufferAllocator array_buffer_allocator;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = &array_buffer_allocator;
create_params.array_buffer_allocator =
v8::ArrayBuffer::Allocator::NewDefaultAllocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
run_shell = (argc == 1);
int result;
@ -103,6 +92,7 @@ int main(int argc, char* argv[]) {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return result;
}

81
deps/v8/snapshot_toolchain.gni

@ -25,22 +25,73 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The snapshot needs to be compiled for the host, but compiled with
# a toolchain that matches the bit-width of the target.
import("//build/config/v8_target_cpu.gni")
# TODO(GYP): For now we only support 32-bit little-endian target builds from an
# x64 Linux host. Eventually we need to support all of the host/target
# configurations v8 runs on.
if (host_cpu == "x64" && host_os == "linux") {
if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
snapshot_toolchain = "//build/toolchain/linux:clang_x86"
} else if (target_cpu == "x64" || target_cpu == "arm64" || target_cpu == "mips64el") {
snapshot_toolchain = "//build/toolchain/linux:clang_x64"
} else {
assert(false, "Need environment for this arch: $target_cpu")
}
} else {
snapshot_toolchain = default_toolchain
declare_args() {
# The v8 snapshot needs to be built by code that is compiled with a
# toolchain that matches the bit-width of the target CPU, but runs on
# the host.
v8_snapshot_toolchain = ""
}
# Try to infer the appropriate snapshot toolchain for the v8_current_cpu
# where possible.
#
# Assume that v8_target_cpu (and hence v8_current_cpu) has been validated
# as supported on the current host CPU and OS in v8_target_cpu.gni. The
# logic below is complicated enough without also needing to do input
# validation.
#
# There are test cases for this code posted as an attachment to
# https://crbug.com/625353.
#
# TODO(GYP): Currently only regular (non-cross) compiles, and cross-compiles
# from x64 hosts to Intel, ARM, or MIPS targets, are implemented. Add support
# for the other supported configurations.
if (v8_snapshot_toolchain == "") {
if (current_os == host_os && current_cpu == host_cpu) {
# This is not a cross-compile, so build the snapshot with the current
# toolchain.
v8_snapshot_toolchain = current_toolchain
} else if (current_os == host_os && current_cpu == "x86" &&
host_cpu == "x64") {
# This is an x64 -> x86 cross-compile, but x64 hosts can usually run x86
# binaries built for the same OS, so build the snapshot with the current
# toolchain here, too.
v8_snapshot_toolchain = current_toolchain
} else if (current_os == "win" && host_os == "mac" && is_clang) {
# This is a mac -> win cross-compile, which is only supported w/ clang.
v8_snapshot_toolchain = "//build/toolchain/mac:clang_${v8_current_cpu}"
} else if (host_cpu == "x64") {
# This is a cross-compile from an x64 host to either a non-Intel target
# cpu or a different target OS. Clang will always be used by default on the
# host, unless this is a ChromeOS build, in which case the same toolchain
# (Clang or GCC) will be used for target and host by default.
if (is_chromeos && !is_clang) {
_clang = ""
} else {
_clang = "clang_"
}
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
_cpus = "x64_v8_${v8_current_cpu}"
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
# This branch should not be reached; leave _cpus blank so the assert
# below will fail.
_cpus = ""
}
if (_cpus != "") {
v8_snapshot_toolchain = "//build/toolchain/${host_os}:${_clang}${_cpus}"
}
}
}
assert(v8_snapshot_toolchain != "",
"Do not know how to build a snapshot for $current_toolchain " +
"on $host_os $host_cpu")

8
deps/v8/src/DEPS

@ -3,16 +3,19 @@ include_rules = [
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-stub-assembler.h",
"+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"-src/interpreter",
"+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecode-decoder.h",
"+src/interpreter/bytecode-flags.h",
"+src/interpreter/bytecode-register.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
"+src/interpreter/source-position-table.h",
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform"
]
@ -20,5 +23,6 @@ include_rules = [
specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
],
}

420
deps/v8/src/accessors.cc

@ -40,6 +40,11 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
Handle<Object> set = v8::FromCData(isolate, setter);
info->set_getter(*get);
info->set_setter(*set);
Address redirected = info->redirected_getter();
if (redirected != nullptr) {
Handle<Object> js_get = v8::FromCData(isolate, redirected);
info->set_js_getter(*js_get);
}
return info;
}
@ -67,9 +72,6 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
return
CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, object_offset);
case JS_ARRAY_BUFFER_TYPE:
return CheckForName(name, isolate->factory()->byte_length_string(),
JSArrayBuffer::kByteLengthOffset, object_offset);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
@ -81,57 +83,11 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
}
bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Handle<Name> name,
int* object_offset) {
DCHECK(name->IsUniqueName());
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
case JS_TYPED_ARRAY_TYPE: {
if (!CheckForName(name, isolate->factory()->length_string(),
JSTypedArray::kLengthOffset, object_offset) &&
!CheckForName(name, isolate->factory()->byte_length_string(),
JSTypedArray::kByteLengthOffset, object_offset) &&
!CheckForName(name, isolate->factory()->byte_offset_string(),
JSTypedArray::kByteOffsetOffset, object_offset)) {
return false;
}
if (map->is_dictionary_map()) return false;
// Check if the property is overridden on the instance.
DescriptorArray* descriptors = map->instance_descriptors();
int descriptor = descriptors->SearchWithCache(isolate, *name, *map);
if (descriptor != DescriptorArray::kNotFound) return false;
Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
if (!proto->IsJSReceiver()) return false;
// Check if the property is defined in the prototype chain.
LookupIterator it(proto, name);
if (!it.IsFound()) return false;
Object* original_proto =
JSFunction::cast(map->GetConstructor())->prototype();
// Property is not configurable. It is enough to verify that
// the holder is the same.
return *it.GetHolder<Object>() == original_proto;
}
case JS_DATA_VIEW_TYPE:
return CheckForName(name, isolate->factory()->byte_length_string(),
JSDataView::kByteLengthOffset, object_offset) ||
CheckForName(name, isolate->factory()->byte_offset_string(),
JSDataView::kByteOffsetOffset, object_offset);
default:
return false;
}
}
namespace {
MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
Isolate* isolate, Handle<JSObject> receiver, Handle<JSObject> holder,
Handle<Name> name, Handle<Object> value, bool observe) {
MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
Isolate* isolate, Handle<Object> receiver, Handle<JSObject> holder,
Handle<Name> name, Handle<Object> value) {
LookupIterator it(receiver, name, holder,
LookupIterator::OWN_SKIP_INTERCEPTOR);
// Skip any access checks we might hit. This accessor should never hit in a
@ -140,37 +96,26 @@ MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
CHECK(it.HasAccess());
it.Next();
}
DCHECK(holder.is_identical_to(it.GetHolder<JSObject>()));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
Handle<Object> old_value;
bool is_observed = observe && receiver->map()->is_observed();
if (is_observed) {
MaybeHandle<Object> maybe_old = Object::GetPropertyWithAccessor(&it);
if (!maybe_old.ToHandle(&old_value)) return maybe_old;
}
it.ReconfigureDataProperty(value, it.property_attributes());
if (is_observed && !old_value->SameValue(*value)) {
return JSObject::EnqueueChangeRecord(receiver, "update", name, old_value);
}
return value;
}
} // namespace
void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> receiver =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
isolate, receiver, holder, name, value, false);
MaybeHandle<Object> result =
ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value);
if (result.is_null()) isolate->OptionalRescheduleException(false);
}
@ -221,7 +166,7 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSReceiver> object = Utils::OpenHandle(*info.This());
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
@ -231,9 +176,7 @@ void Accessors::ArrayLengthSetter(
return;
}
if (JSArray::ObservableSetLength(array, length).is_null()) {
isolate->OptionalRescheduleException(false);
}
JSArray::SetLength(array, length);
if (info.ShouldThrowOnError()) {
uint32_t actual_new_len = 0;
@ -305,7 +248,7 @@ void Accessors::ScriptColumnOffsetGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->column_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@ -332,7 +275,7 @@ void Accessors::ScriptIdGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
@ -357,7 +300,7 @@ void Accessors::ScriptNameGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->name();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
@ -381,7 +324,7 @@ void Accessors::ScriptSourceGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->source();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
@ -405,7 +348,7 @@ void Accessors::ScriptLineOffsetGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@ -432,7 +375,7 @@ void Accessors::ScriptTypeGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@ -458,7 +401,7 @@ void Accessors::ScriptCompilationTypeGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->compilation_type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
@ -484,7 +427,7 @@ void Accessors::ScriptLineEndsGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.This());
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Script::InitLineEnds(script);
@ -519,7 +462,7 @@ void Accessors::ScriptSourceUrlGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
@ -543,7 +486,7 @@ void Accessors::ScriptSourceMappingUrlGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* url =
Script::cast(JSValue::cast(object)->value())->source_mapping_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
@ -567,7 +510,7 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
@ -596,7 +539,7 @@ void Accessors::ScriptContextDataGetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@ -621,11 +564,11 @@ void Accessors::ScriptEvalFromScriptGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.This());
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (!script->eval_from_shared()->IsUndefined()) {
if (!script->eval_from_shared()->IsUndefined(isolate)) {
Handle<SharedFunctionInfo> eval_from_shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
if (eval_from_shared->script()->IsScript()) {
@ -657,16 +600,12 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.This());
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
Handle<Code> code(SharedFunctionInfo::cast(
script->eval_from_shared())->code());
result = Handle<Object>(Smi::FromInt(code->SourcePosition(
script->eval_from_instructions_offset())),
isolate);
result = Handle<Object>(Smi::FromInt(script->GetEvalPosition()), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -691,17 +630,19 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.This());
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result;
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
// Find the name of the function calling eval.
if (!shared->name()->IsUndefined()) {
result = Handle<Object>(shared->name(), isolate);
} else {
result = Handle<Object>(shared->inferred_name(), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (!script->eval_from_shared()->IsUndefined(isolate)) {
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
// Find the name of the function calling eval.
if (!shared->name()->IsUndefined(isolate)) {
result = Handle<Object>(shared->name(), isolate);
} else {
result = Handle<Object>(shared->inferred_name(), isolate);
}
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@ -732,24 +673,8 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
Handle<Object> old_value;
bool is_observed = function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
else
old_value = isolate->factory()->NewFunctionPrototype(function);
}
JSFunction::SetPrototype(function, value);
DCHECK(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
MaybeHandle<Object> result = JSObject::EnqueueChangeRecord(
function, "update", isolate->factory()->prototype_string(), old_value);
if (result.is_null()) return MaybeHandle<Object>();
}
return function;
}
@ -811,45 +736,19 @@ void Accessors::FunctionLengthGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
int length = 0;
if (function->shared()->is_compiled()) {
length = function->shared()->length();
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
length = function->shared()->length();
}
if (isolate->has_pending_exception()) {
isolate->OptionalRescheduleException(false);
}
Handle<Object> result;
if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
result = handle(Smi::FromInt(0), isolate);
isolate->OptionalRescheduleException(false);
}
Handle<Object> result(Smi::FromInt(length), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
void Accessors::ObservedReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> receiver =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
Handle<JSObject> holder =
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
isolate, receiver, holder, name, value, true);
if (result.is_null()) isolate->OptionalRescheduleException(false);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&FunctionLengthGetter, &ObservedReconfigureToDataProperty,
&FunctionLengthGetter, &ReconfigureToDataProperty,
attributes);
}
@ -866,19 +765,14 @@ void Accessors::FunctionNameGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (function->shared()->name_should_print_as_anonymous()) {
result = isolate->factory()->anonymous_string();
} else {
result = handle(function->shared()->name(), isolate);
}
Handle<Object> result = JSFunction::GetName(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&FunctionNameGetter, &ObservedReconfigureToDataProperty,
&FunctionNameGetter, &ReconfigureToDataProperty,
attributes);
}
@ -977,7 +871,16 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
// Copy the parameters to the arguments object.
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
for (int i = 0; i < length; i++) {
Object* value = frame->GetParameter(i);
if (value->IsTheHole(isolate)) {
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
DCHECK(IsResumableFunction(function->shared()->kind()));
value = isolate->heap()->undefined_value();
}
array->set(i, value);
}
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
@ -1155,62 +1058,179 @@ Handle<AccessorInfo> Accessors::FunctionCallerInfo(
//
// Accessors::MakeModuleExport
// Accessors::BoundFunctionLength
//
static void ModuleGetExport(v8::Local<v8::Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
Isolate* isolate = instance->GetIsolate();
int slot = info.Data()
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
Handle<Name> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Smi> target_length;
Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
isolate);
if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
target_length = handle(Smi::FromInt(0), isolate);
isolate->OptionalRescheduleException(false);
return;
}
Object* value = context->get(slot);
if (value->IsTheHole()) {
Handle<Name> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
int bound_length = function->bound_arguments()->length();
int length = Max(0, target_length->value() - bound_length);
Handle<Object> result(Smi::FromInt(length), isolate);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&BoundFunctionLengthGetter, &ReconfigureToDataProperty,
attributes);
}
//
// Accessors::BoundFunctionName
//
void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (!JSBoundFunction::GetName(isolate, function).ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
return;
}
info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::BoundFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&BoundFunctionNameGetter, &ReconfigureToDataProperty,
attributes);
}
//
// Accessors::ErrorStack
//
static void ModuleSetExport(v8::Local<v8::Name> property,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
if (!info.ShouldThrowOnError()) return;
Handle<Name> name = v8::Utils::OpenHandle(*property);
Isolate* isolate = name->GetIsolate();
Handle<Object> exception =
isolate->factory()->NewTypeError(MessageTemplate::kNotDefined, name);
isolate->ScheduleThrow(*exception);
namespace {
MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
Handle<JSObject> error) {
RETURN_ON_EXCEPTION(
isolate,
JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
isolate->factory()->undefined_value(), STRICT),
JSReceiver);
return error;
}
bool IsAccessor(Handle<Object> receiver, Handle<Name> name,
Handle<JSObject> holder) {
LookupIterator it(receiver, name, holder,
LookupIterator::OWN_SKIP_INTERCEPTOR);
// Skip any access checks we might hit. This accessor should never hit in a
// situation where the caller does not have access.
if (it.state() == LookupIterator::ACCESS_CHECK) {
CHECK(it.HasAccess());
it.Next();
}
return (it.state() == LookupIterator::ACCESSOR);
}
Handle<AccessorInfo> Accessors::MakeModuleExport(
Handle<String> name,
int index,
PropertyAttributes attributes) {
Isolate* isolate = name->GetIsolate();
Handle<AccessorInfo> info = MakeAccessor(isolate, name, &ModuleGetExport,
&ModuleSetExport, attributes);
info->set_data(Smi::FromInt(index));
return info;
} // namespace
void Accessors::ErrorStackGetter(
v8::Local<v8::Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> holder =
Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
// Retrieve the structured stack trace.
Handle<Object> stack_trace;
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
MaybeHandle<Object> maybe_stack_trace =
JSObject::GetProperty(holder, stack_trace_symbol);
if (!maybe_stack_trace.ToHandle(&stack_trace) ||
stack_trace->IsUndefined(isolate)) {
Handle<Object> result = isolate->factory()->undefined_value();
info.GetReturnValue().Set(Utils::ToLocal(result));
return;
}
// Format it, clear the internal structured trace and reconfigure as a data
// property.
Handle<Object> formatted_stack_trace;
if (!ErrorUtils::FormatStackTrace(isolate, holder, stack_trace)
.ToHandle(&formatted_stack_trace)) {
isolate->OptionalRescheduleException(false);
return;
}
MaybeHandle<Object> result = ClearInternalStackTrace(isolate, holder);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
return;
}
// If stack is still an accessor (this could have changed in the meantime
// since FormatStackTrace can execute arbitrary JS), replace it with a data
// property.
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<Name> name = Utils::OpenHandle(*key);
if (IsAccessor(receiver, name, holder)) {
result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
formatted_stack_trace);
if (result.is_null()) {
isolate->OptionalRescheduleException(false);
return;
}
} else {
// The stack property has been modified in the meantime.
if (!JSObject::GetProperty(holder, name).ToHandle(&formatted_stack_trace)) {
isolate->OptionalRescheduleException(false);
return;
}
}
v8::Local<v8::Value> value = Utils::ToLocal(formatted_stack_trace);
info.GetReturnValue().Set(value);
}
void Accessors::ErrorStackSetter(v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> obj =
Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
// Clear internal properties to avoid memory leaks.
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
if (JSReceiver::HasOwnProperty(obj, stack_trace_symbol).FromMaybe(false)) {
ClearInternalStackTrace(isolate, obj);
}
Accessors::ReconfigureToDataProperty(name, val, info);
}
Handle<AccessorInfo> Accessors::ErrorStackInfo(Isolate* isolate,
PropertyAttributes attributes) {
Handle<AccessorInfo> info =
MakeAccessor(isolate, isolate->factory()->stack_string(),
&ErrorStackGetter, &ErrorStackSetter, attributes);
return info;
}
} // namespace internal
} // namespace v8

23
deps/v8/src/accessors.h

@ -22,6 +22,9 @@ class AccessorInfo;
#define ACCESSOR_INFO_LIST(V) \
V(ArgumentsIterator) \
V(ArrayLength) \
V(BoundFunctionLength) \
V(BoundFunctionName) \
V(ErrorStack) \
V(FunctionArguments) \
V(FunctionCaller) \
V(FunctionName) \
@ -44,10 +47,10 @@ class AccessorInfo;
V(ScriptIsEmbedderDebugScript) \
V(StringLength)
#define ACCESSOR_SETTER_LIST(V) \
V(ReconfigureToDataProperty) \
V(ObservedReconfigureToDataProperty) \
V(ArrayLengthSetter) \
#define ACCESSOR_SETTER_LIST(V) \
V(ReconfigureToDataProperty) \
V(ArrayLengthSetter) \
V(ErrorStackSetter) \
V(FunctionPrototypeSetter)
// Accessors contains all predefined proxy accessors.
@ -85,23 +88,11 @@ class Accessors : public AllStatic {
Handle<JSFunction> object, Handle<Object> value);
static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
Handle<String> name, int index, PropertyAttributes attributes);
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
// Returns true for properties that are accessors to ArrayBufferView and
// derived classes fields. If true, *object_offset contains offset of
// object field. The caller still has to check whether the underlying
// buffer was neutered.
static bool IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Handle<Name> name,
int* object_offset);
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate,
Handle<Name> name,

4
deps/v8/src/address-map.cc

@ -13,7 +13,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
map_ = new HashMap(HashMap::PointersMatch);
map_ = new base::HashMap(base::HashMap::PointersMatch);
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
Object* root = isolate->heap()->root(root_index);
@ -22,7 +22,7 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
// not be referenced through the root list in the snapshot.
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
base::HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
if (entry != NULL) {
// Some are initialized to a previous value in the root list.
DCHECK_LT(GetValue(entry), i);

181
deps/v8/src/address-map.h

@ -6,7 +6,7 @@
#define V8_ADDRESS_MAP_H_
#include "src/assert-scope.h"
#include "src/hashmap.h"
#include "src/base/hashmap.h"
#include "src/objects.h"
namespace v8 {
@ -14,16 +14,17 @@ namespace internal {
class AddressMapBase {
protected:
static void SetValue(HashMap::Entry* entry, uint32_t v) {
static void SetValue(base::HashMap::Entry* entry, uint32_t v) {
entry->value = reinterpret_cast<void*>(v);
}
static uint32_t GetValue(HashMap::Entry* entry) {
static uint32_t GetValue(base::HashMap::Entry* entry) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
}
inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
bool insert) {
inline static base::HashMap::Entry* LookupEntry(base::HashMap* map,
HeapObject* obj,
bool insert) {
if (insert) {
map->LookupOrInsert(Key(obj), Hash(obj));
}
@ -40,7 +41,6 @@ class AddressMapBase {
}
};
class RootIndexMap : public AddressMapBase {
public:
explicit RootIndexMap(Isolate* isolate);
@ -48,134 +48,175 @@ class RootIndexMap : public AddressMapBase {
static const int kInvalidRootIndex = -1;
int Lookup(HeapObject* obj) {
HashMap::Entry* entry = LookupEntry(map_, obj, false);
base::HashMap::Entry* entry = LookupEntry(map_, obj, false);
if (entry) return GetValue(entry);
return kInvalidRootIndex;
}
private:
HashMap* map_;
base::HashMap* map_;
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
};
class BackReference {
class SerializerReference {
public:
explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
SerializerReference() : bitfield_(Special(kInvalidValue)) {}
BackReference() : bitfield_(kInvalidValue) {}
static SerializerReference FromBitfield(uint32_t bitfield) {
return SerializerReference(bitfield);
}
static BackReference SourceReference() { return BackReference(kSourceValue); }
static SerializerReference BackReference(AllocationSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
DCHECK_NE(LO_SPACE, space);
return SerializerReference(
SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
}
static BackReference GlobalProxyReference() {
return BackReference(kGlobalProxyValue);
static SerializerReference MapReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(MAP_SPACE) |
ValueIndexBits::encode(index));
}
static BackReference LargeObjectReference(uint32_t index) {
return BackReference(SpaceBits::encode(LO_SPACE) |
ChunkOffsetBits::encode(index));
static SerializerReference LargeObjectReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(LO_SPACE) |
ValueIndexBits::encode(index));
}
static BackReference DummyReference() { return BackReference(kDummyValue); }
static SerializerReference AttachedReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(kAttachedReferenceSpace) |
ValueIndexBits::encode(index));
}
static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
DCHECK_NE(LO_SPACE, space);
return BackReference(
SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
static SerializerReference DummyReference() {
return SerializerReference(Special(kDummyValue));
}
bool is_valid() const { return bitfield_ != kInvalidValue; }
bool is_source() const { return bitfield_ == kSourceValue; }
bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
bool is_valid() const { return bitfield_ != Special(kInvalidValue); }
bool is_back_reference() const {
return SpaceBits::decode(bitfield_) <= LAST_SPACE;
}
AllocationSpace space() const {
DCHECK(is_valid());
return SpaceBits::decode(bitfield_);
DCHECK(is_back_reference());
return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
}
uint32_t chunk_offset() const {
DCHECK(is_valid());
DCHECK(is_back_reference());
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
}
uint32_t map_index() const {
DCHECK(is_back_reference());
return ValueIndexBits::decode(bitfield_);
}
uint32_t large_object_index() const {
DCHECK(is_valid());
DCHECK(chunk_index() == 0);
return ChunkOffsetBits::decode(bitfield_);
DCHECK(is_back_reference());
return ValueIndexBits::decode(bitfield_);
}
uint32_t chunk_index() const {
DCHECK(is_valid());
DCHECK(is_back_reference());
return ChunkIndexBits::decode(bitfield_);
}
uint32_t reference() const {
DCHECK(is_valid());
uint32_t back_reference() const {
DCHECK(is_back_reference());
return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
}
uint32_t bitfield() const { return bitfield_; }
bool is_attached_reference() const {
return SpaceBits::decode(bitfield_) == kAttachedReferenceSpace;
}
int attached_reference_index() const {
DCHECK(is_attached_reference());
return ValueIndexBits::decode(bitfield_);
}
private:
static const uint32_t kInvalidValue = 0xFFFFFFFF;
static const uint32_t kSourceValue = 0xFFFFFFFE;
static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
static const uint32_t kDummyValue = 0xFFFFFFFC;
explicit SerializerReference(uint32_t bitfield) : bitfield_(bitfield) {}
inline static uint32_t Special(int value) {
return SpaceBits::encode(kSpecialValueSpace) |
ValueIndexBits::encode(value);
}
// We use the 32-bit bitfield to encode either a back reference, a special
// value, or an attached reference index.
// Back reference:
// [ Space index ] [ Chunk index ] [ Chunk offset ]
// [ LO_SPACE ] [ large object index ]
// Special value
// [ kSpecialValueSpace ] [ Special value index ]
// Attached reference
// [ kAttachedReferenceSpace ] [ Attached reference index ]
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
static const int kValueIndexSize = kChunkOffsetSize + kChunkIndexSize;
public:
static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
static const int kSpecialValueSpace = LAST_SPACE + 1;
static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
STATIC_ASSERT(kAttachedReferenceSpace < (1 << kSpaceTagSize));
private:
static const int kInvalidValue = 0;
static const int kDummyValue = 1;
// The chunk offset can also be used to encode the index of special values.
class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
class ChunkIndexBits
: public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
class SpaceBits
: public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
};
class ValueIndexBits : public BitField<uint32_t, 0, kValueIndexSize> {};
STATIC_ASSERT(ChunkIndexBits::kNext == ValueIndexBits::kNext);
class SpaceBits : public BitField<int, kValueIndexSize, kSpaceTagSize> {};
STATIC_ASSERT(SpaceBits::kNext == 32);
uint32_t bitfield_;
};
friend class SerializerReferenceMap;
};
// Mapping objects to their location after deserialization.
// This is used during building, but not at runtime by V8.
class BackReferenceMap : public AddressMapBase {
class SerializerReferenceMap : public AddressMapBase {
public:
BackReferenceMap()
: no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
~BackReferenceMap() { delete map_; }
SerializerReferenceMap()
: no_allocation_(),
map_(base::HashMap::PointersMatch),
attached_reference_index_(0) {}
BackReference Lookup(HeapObject* obj) {
HashMap::Entry* entry = LookupEntry(map_, obj, false);
return entry ? BackReference(GetValue(entry)) : BackReference();
SerializerReference Lookup(HeapObject* obj) {
base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
}
void Add(HeapObject* obj, BackReference b) {
void Add(HeapObject* obj, SerializerReference b) {
DCHECK(b.is_valid());
DCHECK_NULL(LookupEntry(map_, obj, false));
HashMap::Entry* entry = LookupEntry(map_, obj, true);
SetValue(entry, b.bitfield());
}
void AddSourceString(String* string) {
Add(string, BackReference::SourceReference());
DCHECK_NULL(LookupEntry(&map_, obj, false));
base::HashMap::Entry* entry = LookupEntry(&map_, obj, true);
SetValue(entry, b.bitfield_);
}
void AddGlobalProxy(HeapObject* global_proxy) {
Add(global_proxy, BackReference::GlobalProxyReference());
SerializerReference AddAttachedReference(HeapObject* attached_reference) {
SerializerReference reference =
SerializerReference::AttachedReference(attached_reference_index_++);
Add(attached_reference, reference);
return reference;
}
private:
DisallowHeapAllocation no_allocation_;
HashMap* map_;
DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
base::HashMap map_;
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
} // namespace internal

3
deps/v8/src/allocation-site-scopes.cc

@ -3,6 +3,9 @@
// found in the LICENSE file.
#include "src/allocation-site-scopes.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

2
deps/v8/src/allocation-site-scopes.h

@ -5,10 +5,8 @@
#ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_
#include "src/ast/ast.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/zone.h"
namespace v8 {
namespace internal {

11
deps/v8/src/allocation.cc

@ -46,17 +46,6 @@ void Embedded::operator delete(void* p) {
UNREACHABLE();
}
void* AllStatic::operator new(size_t size) {
UNREACHABLE();
return invalid;
}
void AllStatic::operator delete(void* p) {
UNREACHABLE();
}
#endif

6
deps/v8/src/allocation.h

@ -45,12 +45,12 @@ class Embedded {
#endif
// Superclass for classes only using statics.
// Superclass for classes only using static method functions.
// The subclass of AllStatic cannot be instantiated at all.
class AllStatic {
#ifdef DEBUG
public:
void* operator new(size_t size);
void operator delete(void* p);
AllStatic() = delete;
#endif
};

116
deps/v8/src/api-arguments-inl.h

@ -0,0 +1,116 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/api-arguments.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
isolate, &tracing::TraceEventStatsTable::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, \
ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
f(v8::Utils::ToLocal(name), info); \
return GetReturnValue<InternalReturn>(isolate); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
#undef WRITE_CALL_1_NAME
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
uint32_t index) { \
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
isolate, &tracing::TraceEventStatsTable::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
holder(), index)); \
f(index, info); \
return GetReturnValue<InternalReturn>(isolate); \
}
FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
#undef WRITE_CALL_1_INDEX
Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate,
&tracing::TraceEventStatsTable::GenericNamedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::IndexedPropertySetterCallback);
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &tracing::TraceEventStatsTable::IndexedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
Handle<Name> name, Handle<Object> value) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorNameSetterCallback);
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &tracing::TraceEventStatsTable::AccessorNameSetterCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<void> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
} // namespace internal
} // namespace v8

12
deps/v8/src/api-arguments.cc

@ -4,15 +4,20 @@
#include "src/api-arguments.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &internal::tracing::TraceEventStatsTable::FunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_,
is_construct_call_);
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
f(info);
return GetReturnValue<Object>(isolate);
}
@ -20,6 +25,9 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(
isolate, &internal::tracing::TraceEventStatsTable::PropertyCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Array> info(begin());

105
deps/v8/src/api-arguments.h

@ -7,8 +7,6 @@
#include "src/api.h"
#include "src/isolate.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@ -58,7 +56,7 @@ Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
Object** handle = &this->begin()[kReturnValueOffset];
// Nothing was set, return empty handle as per previous behaviour.
if ((*handle)->IsTheHole()) return Handle<V>();
if ((*handle)->IsTheHole(isolate)) return Handle<V>();
Handle<V> result = Handle<V>::cast(Handle<Object>(handle));
result->VerifyApiCallResultType();
return result;
@ -108,84 +106,24 @@ class PropertyCallbackArguments
*/
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> Call(Function f, Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, \
ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
f(v8::Utils::ToLocal(name), info); \
return GetReturnValue<InternalReturn>(isolate); \
}
inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
Handle<Name> name);
inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
Handle<Name> name);
FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
#undef WRITE_CALL_1_NAME
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
Handle<InternalReturn> Call(Function f, uint32_t index) { \
Isolate* isolate = this->isolate(); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
PropertyCallbackInfo<ApiReturn> info(begin()); \
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
holder(), index)); \
f(index, info); \
return GetReturnValue<InternalReturn>(isolate); \
}
inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
#undef WRITE_CALL_1_INDEX
Handle<Object> Call(GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
Handle<Name> name, Handle<Object> value);
Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<v8::Value> info(begin());
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), info);
return GetReturnValue<Object>(isolate);
}
inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
Handle<Object> value);
void Call(AccessorNameSetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
PropertyCallbackInfo<void> info(begin());
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
Handle<Object> value);
private:
inline JSObject* holder() {
@ -206,19 +144,19 @@ class FunctionCallbackArguments
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
internal::HeapObject* callee,
internal::Object* holder, internal::Object** argv,
int argc, bool is_construct_call)
: Super(isolate),
argv_(argv),
argc_(argc),
is_construct_call_(is_construct_call) {
internal::Object* holder,
internal::HeapObject* new_target,
internal::Object** argv, int argc)
: Super(isolate), argv_(argv), argc_(argc) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kNewTargetIndex] = new_target;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
@ -245,7 +183,6 @@ class FunctionCallbackArguments
private:
internal::Object** argv_;
int argc_;
bool is_construct_call_;
};
} // namespace internal

10
deps/v8/src/api-experimental.cc

@ -76,6 +76,10 @@ FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalField(
return FromApi(this)->LoadInternalField(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadInternalFieldUnchecked(
ValueId value, int field_no) {
return FromApi(this)->LoadInternalFieldUnchecked(value, field_no);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::LoadValue(ValueId value_id,
int offset) {
@ -88,6 +92,9 @@ FastAccessorBuilder::ValueId FastAccessorBuilder::LoadObject(ValueId value_id,
return FromApi(this)->LoadObject(value_id, offset);
}
FastAccessorBuilder::ValueId FastAccessorBuilder::ToSmi(ValueId value_id) {
return FromApi(this)->ToSmi(value_id);
}
void FastAccessorBuilder::ReturnValue(ValueId value) {
FromApi(this)->ReturnValue(value);
@ -113,6 +120,9 @@ void FastAccessorBuilder::SetLabel(LabelId label_id) {
FromApi(this)->SetLabel(label_id);
}
void FastAccessorBuilder::Goto(LabelId label_id) {
FromApi(this)->Goto(label_id);
}
void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {

559
deps/v8/src/api-natives.cc

@ -15,23 +15,44 @@ namespace internal {
namespace {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> data,
bool is_hidden_prototype);
class InvokeScope {
public:
explicit InvokeScope(Isolate* isolate) : save_context_(isolate) {}
~InvokeScope() {
Isolate* isolate = save_context_.isolate();
bool has_exception = isolate->has_pending_exception();
if (has_exception) {
isolate->ReportPendingMessages();
} else {
isolate->clear_pending_message();
}
}
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name = Handle<Name>());
private:
SaveContext save_context_;
};
enum class CacheCheck { kCheck, kSkip };
MaybeHandle<JSObject> InstantiateObject(
Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target, CacheCheck cache_check = CacheCheck::kCheck,
bool is_hidden_prototype = false);
MaybeHandle<JSFunction> InstantiateFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> data,
CacheCheck cache_check = CacheCheck::kCheck,
Handle<Name> name = Handle<Name>());
MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<Name> name = Handle<Name>()) {
if (data->IsFunctionTemplateInfo()) {
return InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(data), name);
Handle<FunctionTemplateInfo>::cast(data),
CacheCheck::kCheck, name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
false);
Handle<JSReceiver>());
} else {
return data;
}
@ -152,25 +173,6 @@ Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
return nullptr;
}
// Returns parent function template or null.
FunctionTemplateInfo* GetParent(FunctionTemplateInfo* data) {
Object* parent = data->parent_template();
return parent->IsUndefined() ? nullptr : FunctionTemplateInfo::cast(parent);
}
// Starting from given object template's constructor walk up the inheritance
// chain till a function template that has an instance template is found.
ObjectTemplateInfo* GetParent(ObjectTemplateInfo* data) {
Object* maybe_ctor = data->constructor();
if (maybe_ctor->IsUndefined()) return nullptr;
FunctionTemplateInfo* ctor = FunctionTemplateInfo::cast(maybe_ctor);
while (true) {
ctor = GetParent(ctor);
if (ctor == nullptr) return nullptr;
Object* maybe_obj = ctor->instance_template();
if (!maybe_obj->IsUndefined()) return ObjectTemplateInfo::cast(maybe_obj);
}
}
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
@ -184,15 +186,11 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int max_number_of_properties = 0;
TemplateInfoT* info = *data;
while (info != nullptr) {
if (!info->property_accessors()->IsUndefined()) {
Object* props = info->property_accessors();
if (!props->IsUndefined()) {
Handle<Object> props_handle(props, isolate);
NeanderArray props_array(props_handle);
max_number_of_properties += props_array.length();
}
Object* props = info->property_accessors();
if (!props->IsUndefined(isolate)) {
max_number_of_properties += TemplateList::cast(props)->length();
}
info = GetParent(info);
info = info->GetParent(isolate);
}
if (max_number_of_properties > 0) {
@ -204,12 +202,12 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
info = *data;
while (info != nullptr) {
// Accumulate accessors.
if (!info->property_accessors()->IsUndefined()) {
Handle<Object> props(info->property_accessors(), isolate);
valid_descriptors =
AccessorInfo::AppendUnique(props, array, valid_descriptors);
Object* maybe_properties = info->property_accessors();
if (!maybe_properties->IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
handle(maybe_properties, isolate), array, valid_descriptors);
}
info = GetParent(info);
info = info->GetParent(isolate);
}
// Install accumulated accessors.
@ -219,29 +217,29 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
}
}
auto property_list = handle(data->property_list(), isolate);
if (property_list->IsUndefined()) return obj;
// TODO(dcarney): just use a FixedArray here.
NeanderArray properties(property_list);
if (properties.length() == 0) return obj;
Object* maybe_property_list = data->property_list();
if (maybe_property_list->IsUndefined(isolate)) return obj;
Handle<TemplateList> properties(TemplateList::cast(maybe_property_list),
isolate);
if (properties->length() == 0) return obj;
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties.get(i++)), isolate);
auto bit = handle(properties.get(i++), isolate);
auto name = handle(Name::cast(properties->get(i++)), isolate);
Object* bit = properties->get(i++);
if (bit->IsSmi()) {
PropertyDetails details(Smi::cast(*bit));
PropertyDetails details(Smi::cast(bit));
PropertyAttributes attributes = details.attributes();
PropertyKind kind = details.kind();
if (kind == kData) {
auto prop_data = handle(properties.get(i++), isolate);
auto prop_data = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
} else {
auto getter = handle(properties.get(i++), isolate);
auto setter = handle(properties.get(i++), isolate);
auto getter = handle(properties->get(i++), isolate);
auto setter = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(
isolate, DefineAccessorProperty(isolate, obj, name, getter, setter,
attributes, is_hidden_prototype),
@ -250,12 +248,12 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
} else {
// Intrinsic data property --- Get appropriate value from the current
// context.
PropertyDetails details(Smi::cast(properties.get(i++)));
PropertyDetails details(Smi::cast(properties->get(i++)));
PropertyAttributes attributes = details.attributes();
DCHECK_EQ(kData, details.kind());
v8::Intrinsic intrinsic =
static_cast<v8::Intrinsic>(Smi::cast(properties.get(i++))->value());
static_cast<v8::Intrinsic>(Smi::cast(properties->get(i++))->value());
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
@ -266,106 +264,189 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
void CacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number,
MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
int serial_number) {
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
return fast_cache->GetValue<JSObject>(isolate, serial_number - 1);
} else {
Handle<UnseededNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
if (entry == UnseededNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
}
}
void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<JSObject> object) {
auto cache = isolate->template_instantiations_cache();
auto new_cache =
UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
Handle<FixedArray> new_cache =
FixedArray::SetAndGrow(fast_cache, serial_number - 1, object);
if (*new_cache != *fast_cache) {
isolate->native_context()->set_fast_template_instantiations_cache(
*new_cache);
}
} else {
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache =
UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
}
}
}
void UncacheTemplateInstantiation(Isolate* isolate, uint32_t serial_number) {
auto cache = isolate->template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
DCHECK(entry != UnseededNumberDictionary::kNotFound);
Handle<Object> result =
UnseededNumberDictionary::DeleteProperty(cache, entry);
USE(result);
DCHECK(result->IsTrue());
auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
isolate->native_context()->set_template_instantiations_cache(*new_cache);
void UncacheTemplateInstantiation(Isolate* isolate, int serial_number) {
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
DCHECK(!fast_cache->get(serial_number - 1)->IsUndefined(isolate));
fast_cache->set_undefined(serial_number - 1);
} else {
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
DCHECK(entry != UnseededNumberDictionary::kNotFound);
Handle<Object> result =
UnseededNumberDictionary::DeleteProperty(cache, entry);
USE(result);
DCHECK(result->IsTrue(isolate));
auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
}
}
bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo* info,
JSReceiver* new_target) {
DisallowHeapAllocation no_gc;
if (!new_target->IsJSFunction()) return false;
JSFunction* fun = JSFunction::cast(new_target);
if (fun->shared()->function_data() != info->constructor()) return false;
if (info->immutable_proto()) return false;
return fun->context()->native_context() == isolate->raw_native_context();
}
MaybeHandle<JSObject> InstantiateObjectWithInvokeScope(
Isolate* isolate, Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target) {
InvokeScope invoke_scope(isolate);
return InstantiateObject(isolate, info, new_target, CacheCheck::kSkip);
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
CacheCheck cache_check,
bool is_hidden_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::cast(info->serial_number())->value();
if (!new_target.is_null()) {
if (IsSimpleInstantiation(isolate, *info, *new_target)) {
constructor = Handle<JSFunction>::cast(new_target);
} else {
// Disable caching for subclass instantiation.
serial_number = 0;
}
}
// Fast path.
Handle<JSObject> result;
uint32_t serial_number =
static_cast<uint32_t>(Smi::cast(info->serial_number())->value());
if (serial_number) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
if (entry != UnseededNumberDictionary::kNotFound) {
Object* boilerplate = cache->ValueAt(entry);
result = handle(JSObject::cast(boilerplate), isolate);
if (serial_number && cache_check == CacheCheck::kCheck) {
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
}
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
auto constructor = handle(info->constructor(), isolate);
Handle<JSFunction> cons;
if (constructor->IsUndefined()) {
cons = isolate->object_function();
} else {
auto cons_templ = Handle<FunctionTemplateInfo>::cast(constructor);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
if (constructor.is_null()) {
Object* maybe_constructor_info = info->constructor();
if (maybe_constructor_info->IsUndefined(isolate)) {
constructor = isolate->object_function();
} else {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<FunctionTemplateInfo> cons_templ(
FunctionTemplateInfo::cast(maybe_constructor_info), isolate);
Handle<JSFunction> tmp_constructor;
ASSIGN_RETURN_ON_EXCEPTION(isolate, tmp_constructor,
InstantiateFunction(isolate, cons_templ),
JSObject);
constructor = scope.CloseAndEscape(tmp_constructor);
}
if (new_target.is_null()) new_target = constructor;
}
auto object = isolate->factory()->NewJSObject(cons);
Handle<JSObject> object;
ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
ConfigureInstance(isolate, object, info, is_hidden_prototype),
JSFunction);
// TODO(dcarney): is this necessary?
ConfigureInstance(isolate, object, info, is_hidden_prototype), JSObject);
if (info->immutable_proto()) {
JSObject::SetImmutableProto(object);
}
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
if (serial_number) {
CacheTemplateInstantiation(isolate, serial_number, result);
result = isolate->factory()->CopyJSObject(result);
}
return scope.CloseAndEscape(result);
return result;
}
MaybeHandle<JSFunction> InstantiateFunctionWithInvokeScope(
Isolate* isolate, Handle<FunctionTemplateInfo> info) {
InvokeScope invoke_scope(isolate);
return InstantiateFunction(isolate, info, CacheCheck::kSkip);
}
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
CacheCheck cache_check,
Handle<Name> name) {
uint32_t serial_number =
static_cast<uint32_t>(Smi::cast(data->serial_number())->value());
if (serial_number) {
// Probe cache.
auto cache = isolate->template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
if (entry != UnseededNumberDictionary::kNotFound) {
Object* element = cache->ValueAt(entry);
return handle(JSFunction::cast(element), isolate);
int serial_number = Smi::cast(data->serial_number())->value();
if (serial_number && cache_check == CacheCheck::kCheck) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return Handle<JSFunction>::cast(result);
}
}
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSObject> prototype;
if (!data->remove_prototype()) {
auto prototype_templ = handle(data->prototype_template(), isolate);
if (prototype_templ->IsUndefined()) {
Object* prototype_templ = data->prototype_template();
if (prototype_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
InstantiateObject(isolate,
Handle<ObjectTemplateInfo>::cast(prototype_templ),
data->hidden_prototype()),
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
Handle<JSReceiver>(), CacheCheck::kCheck,
data->hidden_prototype()),
JSFunction);
}
auto parent = handle(data->parent_template(), isolate);
if (!parent->IsUndefined()) {
Object* parent = data->parent_template();
if (!parent->IsUndefined(isolate)) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(parent)),
InstantiateFunction(
isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
JSFunction);
// TODO(dcarney): decide what to do here.
Handle<Object> parent_prototype;
@ -379,7 +460,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
MaybeHandle<JSFunction>());
}
}
auto function = ApiNatives::CreateApiFunction(
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, data, prototype, ApiNatives::JavaScriptObjectType);
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
@ -388,7 +469,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, function);
}
auto result =
MaybeHandle<JSObject> result =
ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
@ -397,65 +478,89 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
return MaybeHandle<JSFunction>();
}
return scope.CloseAndEscape(function);
return function;
}
class InvokeScope {
public:
explicit InvokeScope(Isolate* isolate)
: isolate_(isolate), save_context_(isolate) {}
~InvokeScope() {
bool has_exception = isolate_->has_pending_exception();
if (has_exception) {
isolate_->ReportPendingMessages();
} else {
isolate_->clear_pending_message();
}
}
private:
Isolate* isolate_;
SaveContext save_context_;
};
void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
int length, Handle<Object>* data) {
auto list = handle(templ->property_list(), isolate);
if (list->IsUndefined()) {
list = NeanderArray(isolate).value();
templ->set_property_list(*list);
Object* maybe_list = templ->property_list();
Handle<TemplateList> list;
if (maybe_list->IsUndefined(isolate)) {
list = TemplateList::New(isolate, length);
} else {
list = handle(TemplateList::cast(maybe_list), isolate);
}
templ->set_number_of_properties(templ->number_of_properties() + 1);
NeanderArray array(list);
for (int i = 0; i < length; i++) {
Handle<Object> value =
data[i].is_null()
? Handle<Object>::cast(isolate->factory()->undefined_value())
: data[i];
array.add(isolate, value);
list = TemplateList::Add(isolate, list, value);
}
templ->set_property_list(*list);
}
} // namespace
MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
Handle<FunctionTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateFunction(isolate, data);
Handle<FunctionTemplateInfo> info) {
Isolate* isolate = info->GetIsolate();
int serial_number = Smi::cast(info->serial_number())->value();
if (serial_number) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return Handle<JSFunction>::cast(result);
}
}
return InstantiateFunctionWithInvokeScope(isolate, info);
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Handle<ObjectTemplateInfo> info, Handle<JSReceiver> new_target) {
Isolate* isolate = info->GetIsolate();
int serial_number = Smi::cast(info->serial_number())->value();
if (serial_number && !new_target.is_null() &&
IsSimpleInstantiation(isolate, *info, *new_target)) {
// Fast path.
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
}
return InstantiateObjectWithInvokeScope(isolate, info, new_target);
}
MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateObject(isolate, data, false);
}
Handle<FunctionTemplateInfo> constructor(
FunctionTemplateInfo::cast(data->constructor()));
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, constructor);
Handle<Map> initial_map = isolate->factory()->CreateSloppyFunctionMap(
FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Handle<JSFunction> object_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
initial_map, shared, isolate->factory()->undefined_value());
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->internal_field_count() * kPointerSize,
FAST_HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(object_function, object_map,
isolate->factory()->null_value());
object_map->set_is_access_check_needed(true);
object_map->set_is_callable();
object_map->set_is_constructor(true);
Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
return object;
}
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
@ -498,108 +603,91 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<TemplateInfo> info,
Handle<AccessorInfo> property) {
auto list = handle(info->property_accessors(), isolate);
if (list->IsUndefined()) {
list = NeanderArray(isolate).value();
info->set_property_accessors(*list);
Object* maybe_list = info->property_accessors();
Handle<TemplateList> list;
if (maybe_list->IsUndefined(isolate)) {
list = TemplateList::New(isolate, 1);
} else {
list = handle(TemplateList::cast(maybe_list), isolate);
}
NeanderArray array(list);
array.add(isolate, property);
list = TemplateList::Add(isolate, list, property);
info->set_property_accessors(*list);
}
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
Handle<Code> code;
if (obj->call_code()->IsCallHandlerInfo() &&
CallHandlerInfo::cast(obj->call_code())->fast_handler()->IsCode()) {
code = isolate->builtins()->HandleFastApiCall();
} else {
code = isolate->builtins()->HandleApiCall();
}
Handle<Code> construct_stub =
prototype.is_null() ? isolate->builtins()->ConstructedNonConstructable()
: isolate->builtins()->JSConstructStubApi();
obj->set_instantiated(true);
Handle<JSFunction> result;
if (obj->remove_prototype()) {
result = isolate->factory()->NewFunctionWithoutPrototype(
isolate->factory()->empty_string(), code);
} else {
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined()) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count =
Smi::cast(instance_template->internal_field_count())->value();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:
if (!obj->needs_access_check() &&
obj->named_property_handler()->IsUndefined() &&
obj->indexed_property_handler()->IsUndefined()) {
type = JS_OBJECT_TYPE;
} else {
type = JS_SPECIAL_API_OBJECT_TYPE;
}
instance_size += JSObject::kHeaderSize;
break;
case GlobalObjectType:
type = JS_GLOBAL_OBJECT_TYPE;
instance_size += JSGlobalObject::kSize;
break;
case GlobalProxyType:
type = JS_GLOBAL_PROXY_TYPE;
instance_size += JSGlobalProxy::kSize;
break;
default:
UNREACHABLE();
type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
result = isolate->factory()->NewFunction(
isolate->factory()->empty_string(), code, prototype, type,
instance_size, obj->read_only_prototype(), true);
}
result->shared()->set_length(obj->length());
Handle<Object> class_name(obj->class_name(), isolate);
if (class_name->IsString()) {
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
}
result->shared()->set_api_func_data(*obj);
result->shared()->set_construct_stub(*construct_stub);
result->shared()->DontAdaptArguments();
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj);
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
if (obj->remove_prototype()) {
result->set_map(*isolate->sloppy_function_without_prototype_map());
DCHECK(prototype.is_null());
DCHECK(result->shared()->IsApiFunction());
DCHECK(!result->has_initial_map());
DCHECK(!result->has_prototype());
DCHECK(!result->IsConstructor());
return result;
}
#ifdef DEBUG
LookupIterator it(handle(JSObject::cast(result->prototype())),
isolate->factory()->constructor_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
DCHECK(it.IsFound());
DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
#endif
// Down from here is only valid for API functions that can be used as a
// constructor (don't set the "remove prototype" flag).
Handle<Map> map(result->initial_map());
if (obj->read_only_prototype()) {
result->set_map(*isolate->sloppy_function_with_readonly_prototype_map());
}
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
} else {
JSObject::AddProperty(Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
}
int internal_field_count = 0;
if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count = instance_template->internal_field_count();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize.
int instance_size = kPointerSize * internal_field_count;
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:
if (!obj->needs_access_check() &&
obj->named_property_handler()->IsUndefined(isolate) &&
obj->indexed_property_handler()->IsUndefined(isolate)) {
type = JS_API_OBJECT_TYPE;
} else {
type = JS_SPECIAL_API_OBJECT_TYPE;
}
instance_size += JSObject::kHeaderSize;
break;
case GlobalObjectType:
type = JS_GLOBAL_OBJECT_TYPE;
instance_size += JSGlobalObject::kSize;
break;
case GlobalProxyType:
type = JS_GLOBAL_PROXY_TYPE;
instance_size += JSGlobalProxy::kSize;
break;
default:
UNREACHABLE();
type = JS_OBJECT_TYPE; // Keep the compiler happy.
break;
}
Handle<Map> map =
isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.
if (obj->undetectable()) {
@ -612,20 +700,19 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
// Set interceptor information in the map.
if (!obj->named_property_handler()->IsUndefined()) {
if (!obj->named_property_handler()->IsUndefined(isolate)) {
map->set_has_named_interceptor();
}
if (!obj->indexed_property_handler()->IsUndefined()) {
if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
map->set_has_indexed_interceptor();
}
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
map->set_is_callable();
map->set_is_constructor(true);
}
DCHECK(result->shared()->IsApiFunction());
return result;
}

4
deps/v8/src/api-natives.h

@ -23,6 +23,10 @@ class ApiNatives {
Handle<FunctionTemplateInfo> data);
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target = Handle<JSReceiver>());
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data);
enum ApiInstanceType {

2122
deps/v8/src/api.cc

File diff suppressed because it is too large

96
deps/v8/src/api.h

@ -26,72 +26,6 @@ class Consts {
};
};
// Utilities for working with neander-objects, primitive
// env-independent JSObjects used by the api.
class NeanderObject {
public:
explicit NeanderObject(v8::internal::Isolate* isolate, int size);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
inline void set(int index, v8::internal::Object* value);
inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
int size();
private:
v8::internal::Handle<v8::internal::JSObject> value_;
};
// Utilities for working with neander-arrays, a simple extensible
// array abstraction built on neander-objects.
class NeanderArray {
public:
explicit NeanderArray(v8::internal::Isolate* isolate);
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
}
void add(internal::Isolate* isolate,
v8::internal::Handle<v8::internal::Object> value);
int length();
v8::internal::Object* get(int index);
// Change the value at an index to undefined value. If the index is
// out of bounds, the request is ignored. Returns the old value.
void set(int index, v8::internal::Object* value);
private:
NeanderObject obj_;
};
NeanderObject::NeanderObject(v8::internal::Handle<v8::internal::Object> obj)
: value_(v8::internal::Handle<v8::internal::JSObject>::cast(obj)) { }
NeanderObject::NeanderObject(v8::internal::Object* obj)
: value_(v8::internal::Handle<v8::internal::JSObject>(
v8::internal::JSObject::cast(obj))) { }
NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
: obj_(obj) { }
v8::internal::Object* NeanderObject::get(int offset) {
DCHECK(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
DCHECK(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
@ -184,9 +118,7 @@ class Utils {
if (!condition) Utils::ReportApiFailure(location, message);
return condition;
}
static Local<FunctionTemplate> ToFunctionTemplate(NeanderObject obj);
static Local<ObjectTemplate> ToObjectTemplate(NeanderObject obj);
static void ReportOOMFailure(const char* location, bool is_heap_oom);
static inline Local<Context> ToLocal(
v8::internal::Handle<v8::internal::Context> obj);
@ -281,7 +213,9 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
DCHECK(obj.is_null() || !obj->IsTheHole());
DCHECK(obj.is_null() ||
(obj->IsSmi() ||
!obj->IsTheHole(i::HeapObject::cast(*obj)->GetIsolate())));
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
@ -450,6 +384,7 @@ class HandleScopeImplementer {
blocks_(0),
entered_contexts_(0),
saved_contexts_(0),
microtask_context_(nullptr),
spare_(NULL),
call_depth_(0),
microtasks_depth_(0),
@ -516,6 +451,10 @@ class HandleScopeImplementer {
// contexts have been entered.
inline Handle<Context> LastEnteredContext();
inline void EnterMicrotaskContext(Handle<Context> context);
inline void LeaveMicrotaskContext();
inline Handle<Context> MicrotaskContext();
inline void SaveContext(Context* context);
inline Context* RestoreContext();
inline bool HasSavedContexts();
@ -534,6 +473,7 @@ class HandleScopeImplementer {
blocks_.Initialize(0);
entered_contexts_.Initialize(0);
saved_contexts_.Initialize(0);
microtask_context_ = nullptr;
spare_ = NULL;
last_handle_before_deferred_block_ = NULL;
call_depth_ = 0;
@ -543,6 +483,7 @@ class HandleScopeImplementer {
DCHECK(blocks_.length() == 0);
DCHECK(entered_contexts_.length() == 0);
DCHECK(saved_contexts_.length() == 0);
DCHECK(!microtask_context_);
blocks_.Free();
entered_contexts_.Free();
saved_contexts_.Free();
@ -562,6 +503,7 @@ class HandleScopeImplementer {
List<Context*> entered_contexts_;
// Used as a stack to keep track of saved contexts.
List<Context*> saved_contexts_;
Context* microtask_context_;
Object** spare_;
int call_depth_;
int microtasks_depth_;
@ -634,6 +576,20 @@ Handle<Context> HandleScopeImplementer::LastEnteredContext() {
return Handle<Context>(entered_contexts_.last());
}
void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
DCHECK(!microtask_context_);
microtask_context_ = *context;
}
void HandleScopeImplementer::LeaveMicrotaskContext() {
DCHECK(microtask_context_);
microtask_context_ = nullptr;
}
Handle<Context> HandleScopeImplementer::MicrotaskContext() {
if (microtask_context_) return Handle<Context>(microtask_context_);
return Handle<Context>::null();
}
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {

42
deps/v8/src/arguments.h

@ -79,22 +79,32 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#define CLOBBER_DOUBLE_REGISTERS()
#endif
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \
Type value; \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), "V8." #Name); \
Arguments args(args_length, args_object); \
if (FLAG_runtime_call_stats) { \
RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
RuntimeCallTimerScope timer(isolate, &stats->Name); \
value = __RT_impl_##Name(args, isolate); \
} else { \
value = __RT_impl_##Name(args, isolate); \
} \
return value; \
} \
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
// TODO(cbruni): Convert the IsContext CHECK back to a DCHECK.
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
Arguments args(args_length, args_object); \
TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED( \
isolate, &tracing::TraceEventStatsTable::Name); \
return __RT_impl_##Name(args, isolate); \
} \
\
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
CLOBBER_DOUBLE_REGISTERS(); \
if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
FLAG_runtime_call_stats)) { \
return Stats_##Name(args_length, args_object, isolate); \
} \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
\
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)

37
deps/v8/src/arm/assembler-arm-inl.h

@ -46,9 +46,9 @@
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
bool CpuFeatures::SupportsSimd128() { return false; }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
@ -71,11 +71,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@ -104,33 +99,6 @@ int RelocInfo::target_address_size() {
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, size_t old_size, size_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK(old_base <= wasm_memory_reference() &&
wasm_memory_reference() < old_base + old_size);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
DCHECK(new_base <= updated_reference &&
updated_reference < new_base + new_size);
Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
icache_flush_mode);
}
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@ -156,6 +124,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@ -276,7 +245,7 @@ void RelocInfo::WipeOut() {
}
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {

326
deps/v8/src/arm/assembler-arm.cc

@ -57,7 +57,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << ARMv8;
// ARMv8 always features VFP and NEON.
answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
answer |= 1u << SUDIV | 1u << MLS;
answer |= 1u << SUDIV;
}
#endif // CAN_USE_ARMV8_INSTRUCTIONS
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
@ -72,9 +72,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
#ifdef CAN_USE_NEON
if (FLAG_enable_neon) answer |= 1u << NEON;
#endif // CAN_USE_VFP32DREGS
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
}
return answer;
}
@ -93,7 +90,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= 1u << ARMv8;
// ARMv8 always features VFP and NEON.
supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
supported_ |= 1u << SUDIV | 1u << MLS;
supported_ |= 1u << SUDIV;
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
if (FLAG_enable_armv7) {
@ -104,8 +101,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
}
if (FLAG_enable_mls) supported_ |= 1u << MLS;
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
#else // __arm__
// Probe for additional features at runtime.
@ -119,14 +114,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
if (cpu.architecture() >= 7) {
if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
if (FLAG_enable_armv8 && cpu.architecture() >= 8) {
supported_ |= 1u << ARMv8;
}
if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
// Use movw/movt for QUALCOMM ARMv7 cores.
if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
@ -141,15 +134,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
// TODO(jkummerow): This is turned off as an experiment to see if it
// affects crash rates. Keep an eye on crash reports and either remove
// coherent cache support permanently, or re-enable it!
// supported_ |= 1u << COHERENT_CACHE;
}
#endif
DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@ -212,18 +196,12 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
"ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
"UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
CpuFeatures::IsSupported(ARMv8),
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(MLS),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
CpuFeatures::IsSupported(COHERENT_CACHE));
"ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
@ -255,6 +233,31 @@ bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
uint32_t RelocInfo::wasm_memory_size_reference() {
DCHECK(IsWasmMemorySizeReference(rmode_));
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
Address RelocInfo::wasm_global_reference() {
DCHECK(IsWasmGlobalReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
@ -266,7 +269,6 @@ Operand::Operand(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@ -463,17 +465,15 @@ const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | Register::kCode_fp * B16;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) {
pending_32_bit_constants_(),
pending_64_bit_constants_(),
constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits) {
pending_32_bit_constants_.reserve(kMinNumPendingConstants);
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@ -486,26 +486,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
DCHECK(const_pool_blocked_nesting_ == 0);
if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
delete[] pending_32_bit_constants_;
}
if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
delete[] pending_64_bit_constants_;
}
}
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
// Emit constant pool if necessary.
int constant_pool_offset = 0;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_offset = EmitEmbeddedConstantPool();
} else {
CheckConstPool(true, false);
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
}
// Set up code descriptor.
desc->buffer = buffer_;
@ -515,6 +507,8 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->constant_pool_size =
(constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
desc->origin = this;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
}
@ -828,6 +822,19 @@ void Assembler::target_at_put(int pos, int target_pos) {
// Load the position of the label relative to the generated code object
// pointer in a register.
// The existing code must be a single 24-bit label chain link, followed by
// nops encoding the destination register. See mov_label_offset.
// Extract the destination register from the first nop instructions.
Register dst =
Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
// In addition to the 24-bit label chain link, we expect to find one nop for
// ARMv7 and above, or two nops for ARMv6. See mov_label_offset.
DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
if (!CpuFeatures::IsSupported(ARMv7)) {
DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
}
// Here are the instructions we need to emit:
// For ARMv7: target24 => target16_1:target16_0
// movw dst, #target16_0
@ -837,10 +844,6 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_1 << 8
// orr dst, dst, #target8_2 << 16
// We extract the destination register from the emitted nop instruction.
Register dst = Register::from_code(
Instruction::RmValue(instr_at(pos + kInstrSize)));
DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
DCHECK(is_uint24(target24));
if (is_uint8(target24)) {
@ -1367,7 +1370,6 @@ void Assembler::b(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond) {
positions_recorder()->WriteRecordedPositions();
DCHECK((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
@ -1376,7 +1378,6 @@ void Assembler::bl(int branch_offset, Condition cond) {
void Assembler::blx(int branch_offset) { // v5 and above
positions_recorder()->WriteRecordedPositions();
DCHECK((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
@ -1386,14 +1387,12 @@ void Assembler::blx(int branch_offset) { // v5 and above
void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions();
DCHECK(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions();
DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@ -1501,9 +1500,6 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
if (dst.is(pc)) {
positions_recorder()->WriteRecordedPositions();
}
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
@ -1586,7 +1582,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
DCHECK(IsEnabled(MLS));
DCHECK(IsEnabled(ARMv7));
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -1702,8 +1698,6 @@ void Assembler::usat(Register dst,
int satpos,
const Operand& src,
Condition cond) {
// v6 and above.
DCHECK(CpuFeatures::IsSupported(ARMv7));
DCHECK(!dst.is(pc) && !src.rm_.is(pc));
DCHECK((satpos >= 0) && (satpos <= 31));
DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@ -1994,9 +1988,6 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
positions_recorder()->WriteRecordedPositions();
}
addrmod2(cond | B26 | L, dst, src);
}
@ -2038,7 +2029,6 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
void Assembler::ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(src.rm().is(no_reg));
DCHECK(!dst1.is(lr)); // r14.
DCHECK_EQ(0, dst1.code() % 2);
@ -2053,10 +2043,56 @@ void Assembler::strd(Register src1, Register src2,
DCHECK(!src1.is(lr)); // r14.
DCHECK_EQ(0, src1.code() % 2);
DCHECK_EQ(src1.code() + 1, src2.code());
DCHECK(IsEnabled(ARMv7));
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
// Load/Store exclusive instructions.
void Assembler::ldrex(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.75.
// cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
}
void Assembler::strex(Register src1, Register src2, Register dst,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.212.
// cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
src2.code());
}
void Assembler::ldrexb(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.76.
// cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
0xf9f);
}
void Assembler::strexb(Register src1, Register src2, Register dst,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.213.
// cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
0xf9 * B4 | src2.code());
}
void Assembler::ldrexh(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.78.
// cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
dst.code() * B12 | 0xf9f);
}
void Assembler::strexh(Register src1, Register src2, Register dst,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.215.
// cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
// Rt(3-0)
emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
0xf9 * B4 | src2.code());
}
// Preload instructions.
void Assembler::pld(const MemOperand& address) {
@ -2122,7 +2158,11 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
} else {
svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
// Do not embed the message string address! We used to do this, but that
// made snapshots created from position-independent executable builds
// non-deterministic.
// TODO(yangguo): remove this field entirely.
nop();
}
#else // def __arm__
if (cond != al) {
@ -3371,6 +3411,69 @@ void Assembler::vcmp(const SwVfpRegister src1, const float src2,
0x5 * B9 | B6);
}
void Assembler::vsel(Condition cond, const DwVfpRegister dst,
const DwVfpRegister src1, const DwVfpRegister src2) {
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = 1;
// VSEL has a special (restricted) condition encoding.
// eq(0b0000)... -> 0b00
// ge(0b1010)... -> 0b10
// gt(0b1100)... -> 0b11
// vs(0b0110)... -> 0b01
// No other conditions are supported.
int vsel_cond = (cond >> 30) & 0x3;
if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
// We can implement some other conditions by swapping the inputs.
DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
std::swap(vn, vm);
std::swap(n, m);
}
emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
}
void Assembler::vsel(Condition cond, const SwVfpRegister dst,
const SwVfpRegister src1, const SwVfpRegister src2) {
// cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
// vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
// 0(6) | M(5) | 0(4) | Vm(3-0)
DCHECK(CpuFeatures::IsSupported(ARMv8));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = 0;
// VSEL has a special (restricted) condition encoding.
// eq(0b0000)... -> 0b00
// ge(0b1010)... -> 0b10
// gt(0b1100)... -> 0b11
// vs(0b0110)... -> 0b01
// No other conditions are supported.
int vsel_cond = (cond >> 30) & 0x3;
if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
// We can implement some other conditions by swapping the inputs.
DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
std::swap(vn, vm);
std::swap(n, m);
}
emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
}
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
@ -3745,8 +3848,8 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
// db is used to write raw data. The constant pool should be emitted or
// blocked before using db.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -3756,8 +3859,8 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
// dd is used to write raw data. The constant pool should be emitted or
// blocked before using dd.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@ -3767,8 +3870,8 @@ void Assembler::dd(uint32_t data) {
void Assembler::dq(uint64_t value) {
// dq is used to write raw data. The constant pool should be emitted or
// blocked before using dq.
DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
@ -3803,29 +3906,19 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
rmode != RelocInfo::STATEMENT_POSITION &&
rmode != RelocInfo::CONST_POOL && rmode != RelocInfo::NONE64);
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
!(serializer_enabled() || rmode < RelocInfo::CELL);
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value, sharing_ok);
} else {
DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
if (num_pending_32_bit_constants_ == 0) {
DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
} else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
pending_32_bit_constants_ ==
&pending_32_bit_constants_buffer_[0]) {
// Inline buffer is full, switch to dynamically allocated buffer.
pending_32_bit_constants_ =
new ConstantPoolEntry[kMaxNumPending32Constants];
std::copy(&pending_32_bit_constants_buffer_[0],
&pending_32_bit_constants_buffer_[kMinNumPendingConstants],
&pending_32_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value, sharing_ok);
pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
pending_32_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@ -3840,21 +3933,12 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
if (FLAG_enable_embedded_constant_pool) {
return constant_pool_builder_.AddEntry(position, value);
} else {
DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
if (num_pending_64_bit_constants_ == 0) {
DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
} else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
pending_64_bit_constants_ ==
&pending_64_bit_constants_buffer_[0]) {
// Inline buffer is full, switch to dynamically allocated buffer.
pending_64_bit_constants_ =
new ConstantPoolEntry[kMaxNumPending64Constants];
std::copy(&pending_64_bit_constants_buffer_[0],
&pending_64_bit_constants_buffer_[kMinNumPendingConstants],
&pending_64_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value);
pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
pending_64_bit_constants_.push_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@ -3867,8 +3951,8 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
void Assembler::BlockConstPoolFor(int instructions) {
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
return;
}
@ -3877,11 +3961,11 @@ void Assembler::BlockConstPoolFor(int instructions) {
// Max pool start (if we need a jump and an alignment).
#ifdef DEBUG
int start = pc_limit + kInstrSize + 2 * kPointerSize;
DCHECK((num_pending_32_bit_constants_ == 0) ||
DCHECK(pending_32_bit_constants_.empty() ||
(start - first_const_pool_32_use_ +
num_pending_64_bit_constants_ * kDoubleSize <
pending_64_bit_constants_.size() * kDoubleSize <
kMaxDistToIntPool));
DCHECK((num_pending_64_bit_constants_ == 0) ||
DCHECK(pending_64_bit_constants_.empty() ||
(start - first_const_pool_64_use_ < kMaxDistToFPPool));
#endif
no_const_pool_before_ = pc_limit;
@ -3896,8 +3980,8 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (FLAG_enable_embedded_constant_pool) {
// Should be a no-op if using an embedded constant pool.
DCHECK(num_pending_32_bit_constants_ == 0);
DCHECK(num_pending_64_bit_constants_ == 0);
DCHECK(pending_32_bit_constants_.empty());
DCHECK(pending_64_bit_constants_.empty());
return;
}
@ -3911,8 +3995,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
if ((num_pending_32_bit_constants_ == 0) &&
(num_pending_64_bit_constants_ == 0)) {
if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
@ -3924,9 +4007,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int jump_instr = require_jump ? kInstrSize : 0;
int size_up_to_marker = jump_instr + kInstrSize;
int estimated_size_after_marker =
num_pending_32_bit_constants_ * kPointerSize;
bool has_int_values = (num_pending_32_bit_constants_ > 0);
bool has_fp_values = (num_pending_64_bit_constants_ > 0);
pending_32_bit_constants_.size() * kPointerSize;
bool has_int_values = !pending_32_bit_constants_.empty();
bool has_fp_values = !pending_64_bit_constants_.empty();
bool require_64_bit_align = false;
if (has_fp_values) {
require_64_bit_align =
@ -3935,7 +4018,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (require_64_bit_align) {
estimated_size_after_marker += kInstrSize;
}
estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
estimated_size_after_marker +=
pending_64_bit_constants_.size() * kDoubleSize;
}
int estimated_size = size_up_to_marker + estimated_size_after_marker;
@ -3954,7 +4038,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we can ignore the effect of the 32-bit constants on estimated_size.
int dist64 = pc_offset() + estimated_size -
num_pending_32_bit_constants_ * kPointerSize -
pending_32_bit_constants_.size() * kPointerSize -
first_const_pool_64_use_;
if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
(!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@ -3973,7 +4057,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
for (int i = 0; i < num_pending_64_bit_constants_; i++) {
for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!entry.is_merged());
for (int j = 0; j < i; j++) {
@ -3986,7 +4070,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
for (int i = 0; i < num_pending_32_bit_constants_; i++) {
for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
DCHECK(!entry.is_merged());
if (!entry.sharing_ok()) continue;
@ -4031,7 +4115,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
for (int i = 0; i < num_pending_64_bit_constants_; i++) {
for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
Instr instr = instr_at(entry.position());
@ -4060,7 +4144,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_32_bit_constants_; i++) {
for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
Instr instr = instr_at(entry.position());
@ -4094,8 +4178,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
num_pending_32_bit_constants_ = 0;
num_pending_64_bit_constants_ = 0;
pending_32_bit_constants_.clear();
pending_64_bit_constants_.clear();
first_const_pool_32_use_ = -1;
first_const_pool_64_use_ = -1;

89
deps/v8/src/arm/assembler-arm.h

@ -57,12 +57,22 @@ namespace internal {
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
#define DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
#define SIMD128_REGISTERS(V) \
V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
@ -112,8 +122,6 @@ struct Register {
Register r = {code};
return r;
}
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
bool is(Register reg) const { return reg_code == reg.reg_code; }
int code() const {
@ -141,9 +149,22 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = false;
// Single word VFP register.
struct SwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
FLOAT_REGISTERS(REGISTER_CODE)
#undef REGISTER_CODE
kAfterLast,
kCode_no_reg = -1
};
static const int kMaxNumRegisters = Code::kAfterLast;
static const int kSizeInBytes = 4;
bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
@ -154,6 +175,10 @@ struct SwVfpRegister {
DCHECK(is_valid());
return 1 << reg_code;
}
static SwVfpRegister from_code(int code) {
SwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
DCHECK(is_valid());
*m = reg_code & 0x1;
@ -163,9 +188,10 @@ struct SwVfpRegister {
int reg_code;
};
typedef SwVfpRegister FloatRegister;
// Double word VFP register.
struct DoubleRegister {
struct DwVfpRegister {
enum Code {
#define REGISTER_CODE(R) kCode_##R,
DOUBLE_REGISTERS(REGISTER_CODE)
@ -184,10 +210,8 @@ struct DoubleRegister {
// d15: scratch register.
static const int kSizeInBytes = 8;
const char* ToString();
bool IsAllocatable() const;
bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
int code() const {
DCHECK(is_valid());
return reg_code;
@ -197,8 +221,8 @@ struct DoubleRegister {
return 1 << reg_code;
}
static DoubleRegister from_code(int code) {
DoubleRegister r = {code};
static DwVfpRegister from_code(int code) {
DwVfpRegister r = {code};
return r;
}
void split_code(int* vm, int* m) const {
@ -211,7 +235,7 @@ struct DoubleRegister {
};
typedef DoubleRegister DwVfpRegister;
typedef DwVfpRegister DoubleRegister;
// Double word VFP register d0-15.
@ -975,6 +999,14 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
// Load/Store exclusive instructions
void ldrex(Register dst, Register src, Condition cond = al);
void strex(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexb(Register dst, Register src, Condition cond = al);
void strexb(Register src1, Register src2, Register dst, Condition cond = al);
void ldrexh(Register dst, Register src, Condition cond = al);
void strexh(Register src1, Register src2, Register dst, Condition cond = al);
// Preload instructions
void pld(const MemOperand& address);
@ -1225,6 +1257,17 @@ class Assembler : public AssemblerBase {
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
// VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
void vsel(const Condition cond,
const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2);
void vsel(const Condition cond,
const SwVfpRegister dst,
const SwVfpRegister src1,
const SwVfpRegister src2);
void vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
@ -1290,6 +1333,10 @@ class Assembler : public AssemblerBase {
vstm(db_w, sp, src, src, cond);
}
void vpush(SwVfpRegister src, Condition cond = al) {
vstm(db_w, sp, src, src, cond);
}
void vpop(DwVfpRegister dst, Condition cond = al) {
vldm(ia_w, sp, dst, dst, cond);
}
@ -1357,7 +1404,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
// Record the emission of a constant pool.
//
@ -1390,10 +1437,6 @@ class Assembler : public AssemblerBase {
// Emits the address of the code stub's first instruction.
void emit_code_stub_address(Code* stub);
AssemblerPositionsRecorder* positions_recorder() {
return &positions_recorder_;
}
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
@ -1523,10 +1566,10 @@ class Assembler : public AssemblerBase {
// Max pool start (if we need a jump and an alignment).
int start = pc_offset() + kInstrSize + 2 * kPointerSize;
// Check the constant pool hasn't been blocked for too long.
DCHECK((num_pending_32_bit_constants_ == 0) ||
(start + num_pending_64_bit_constants_ * kDoubleSize <
DCHECK(pending_32_bit_constants_.empty() ||
(start + pending_64_bit_constants_.size() * kDoubleSize <
(first_const_pool_32_use_ + kMaxDistToIntPool)));
DCHECK((num_pending_64_bit_constants_ == 0) ||
DCHECK(pending_64_bit_constants_.empty() ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
// Two cases:
@ -1593,14 +1636,8 @@ class Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
ConstantPoolEntry pending_32_bit_constants_buffer_[kMinNumPendingConstants];
ConstantPoolEntry pending_64_bit_constants_buffer_[kMinNumPendingConstants];
ConstantPoolEntry* pending_32_bit_constants_;
ConstantPoolEntry* pending_64_bit_constants_;
// Number of pending constant pool entries in the 32 bits buffer.
int num_pending_32_bit_constants_;
// Number of pending constant pool entries in the 64 bits buffer.
int num_pending_64_bit_constants_;
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
ConstantPoolBuilder constant_pool_builder_;
@ -1639,8 +1676,6 @@ class Assembler : public AssemblerBase {
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
AssemblerPositionsRecorder positions_recorder_;
friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
};

795
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

165
deps/v8/src/arm/codegen-arm.cc

@ -6,6 +6,8 @@
#if V8_TARGET_ARCH_ARM
#include <memory>
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
#include "src/macro-assembler.h"
@ -16,75 +18,12 @@ namespace internal {
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
return Simulator::current(isolate)
->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
}
#endif
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
{
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
DwVfpRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
if (masm.use_eabi_hardfloat()) {
// Input value is in d0 anyway, nothing to do.
} else {
__ vmov(input, r0, r1);
}
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
if (masm.use_eabi_hardfloat()) {
__ vmov(d0, result);
} else {
__ vmov(r0, r1, result);
}
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
@ -242,7 +181,6 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
@ -450,6 +388,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
__ sub(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
@ -594,11 +533,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
// array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, MemOperand(array, HeapObject::kMapOffset));
__ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
__ sub(array, array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
@ -791,94 +732,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
DCHECK(!input.is(result));
DCHECK(!input.is(double_scratch1));
DCHECK(!input.is(double_scratch2));
DCHECK(!result.is(double_scratch1));
DCHECK(!result.is(double_scratch2));
DCHECK(!double_scratch1.is(double_scratch2));
DCHECK(!temp1.is(temp2));
DCHECK(!temp1.is(temp3));
DCHECK(!temp2.is(temp3));
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label zero, infinity, done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
__ VFPCompareAndSetFlags(double_scratch1, input);
__ b(ge, &zero);
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
__ b(ge, &infinity);
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
__ VmovLow(temp2, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
__ vmul(double_scratch2, double_scratch1, double_scratch1);
__ vmul(result, result, double_scratch2);
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
// Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
DCHECK(*reinterpret_cast<double*>
(ExternalReference::math_exp_constants(8).address()) == 1);
__ vmov(double_scratch2, 1);
__ vadd(result, result, double_scratch2);
__ mov(temp1, Operand(temp2, LSR, 11));
__ Ubfx(temp2, temp2, 0, 11);
__ add(temp1, temp1, Operand(0x3ff));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ add(temp3, temp3, Operand(temp2, LSL, 3));
__ ldm(ia, temp3, temp2.bit() | temp3.bit());
// The first word is loaded is the lower number register.
if (temp2.code() < temp3.code()) {
__ orr(temp1, temp3, Operand(temp1, LSL, 20));
__ vmov(double_scratch1, temp2, temp1);
} else {
__ orr(temp1, temp2, Operand(temp1, LSL, 20));
__ vmov(double_scratch1, temp3, temp1);
}
__ vmul(result, result, double_scratch1);
__ b(&done);
__ bind(&zero);
__ vmov(result, kDoubleRegZero);
__ b(&done);
__ bind(&infinity);
__ vldr(result, ExpConstant(2, temp3));
__ bind(&done);
}
#undef __
#ifdef DEBUG
@ -893,7 +746,7 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// to avoid overloading the stack in stress conditions.
// DONT_FLUSH is used because the CodeAgingHelper is initialized early in
// the process, before ARM simulator ICache is setup.
base::SmartPointer<CodePatcher> patcher(
std::unique_ptr<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
young_sequence_.length() / Assembler::kInstrSize,
CodePatcher::DONT_FLUSH));

17
deps/v8/src/arm/codegen-arm.h

@ -5,7 +5,6 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@ -28,22 +27,6 @@ class StringCharLoadGenerator : public AllStatic {
};
class MathExpGenerator : public AllStatic {
public:
// Register input isn't modified. All other registers are clobbered.
static void EmitMathExp(MacroAssembler* masm,
DwVfpRegister input,
DwVfpRegister result,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} // namespace internal
} // namespace v8

61
deps/v8/src/arm/deoptimizer-arm.cc

@ -66,15 +66,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
deopt_entry,
RelocInfo::NONE32);
int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize();
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
DCHECK(call_size_in_bytes <= patch_size());
CodePatcher patcher(isolate, call_address, call_size_in_words);
patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
patcher.masm()->CallDeoptimizer(deopt_entry);
DCHECK(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
DCHECK(call_address + patch_size() <= code->instruction_end());
@ -189,8 +186,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
int dst_offset = code * kDoubleSize + double_regs_offset;
@ -307,15 +303,50 @@ void Deoptimizer::TableEntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i));
__ b(&done);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
// We need to be able to generate immediates up to kMaxNumberOfEntries. On
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
// need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm(), ARMv7);
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movw(ip, i);
__ b(&done);
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
__ bind(&done);
} else {
// We want to keep table_entry_size_ == 8 (since this is the common case),
// but we need two instructions to load most immediates over 0xff. To handle
// this, we set the low byte in the main table, and then set the high byte
// in a separate table if necessary.
Label high_fixes[256];
int high_fix_max = (count() - 1) >> 8;
DCHECK_GT(arraysize(high_fixes), high_fix_max);
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i & 0xff)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
// Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]);
__ orr(ip, ip, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]);
}
// Bind high_fixes[0] last, for indices like 0x00**. This case requires no
// fix-up, so for (common) small tables we can jump here, then just fall
// through with no additional branch.
__ bind(&high_fixes[0]);
}
__ bind(&done);
__ push(ip);
}

106
deps/v8/src/arm/disasm-arm.cc

@ -40,6 +40,7 @@
namespace v8 {
namespace internal {
const auto GetRegConfig = RegisterConfiguration::Crankshaft;
//------------------------------------------------------------------------------
@ -755,7 +756,45 @@ void Decoder::DecodeType01(Instruction* instr) {
Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
}
} else {
Unknown(instr); // not used by V8
if (instr->Bits(24, 23) == 3) {
if (instr->Bit(20) == 1) {
// ldrex
switch (instr->Bits(22, 21)) {
case 0:
Format(instr, "ldrex'cond 'rt, ['rn]");
break;
case 2:
Format(instr, "ldrexb'cond 'rt, ['rn]");
break;
case 3:
Format(instr, "ldrexh'cond 'rt, ['rn]");
break;
default:
UNREACHABLE();
break;
}
} else {
// strex
// The instruction is documented as strex rd, rt, [rn], but the
// "rt" register is using the rm bits.
switch (instr->Bits(22, 21)) {
case 0:
Format(instr, "strex'cond 'rd, 'rm, ['rn]");
break;
case 2:
Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
break;
case 3:
Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
break;
default:
UNREACHABLE();
break;
}
}
} else {
Unknown(instr); // not used by V8
}
}
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
@ -1325,16 +1364,10 @@ int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"\n %p %08x stop message: %s",
reinterpret_cast<void*>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<uint32_t*>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<char**>(instr
+ Instruction::kInstrSize));
out_buffer_pos_ += SNPrintF(
out_buffer_ + out_buffer_pos_, "\n %p %08x",
reinterpret_cast<void*>(instr + Instruction::kInstrSize),
*reinterpret_cast<uint32_t*>(instr + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
@ -1869,6 +1902,48 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Unknown(instr);
}
break;
case 0x1C:
if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
// VSEL* (floating-point)
bool dp_operation = (instr->SzValue() == 1);
switch (instr->Bits(21, 20)) {
case 0x0:
if (dp_operation) {
Format(instr, "vseleq.f64 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vseleq.f32 'Sd, 'Sn, 'Sm");
}
break;
case 0x1:
if (dp_operation) {
Format(instr, "vselvs.f64 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vselvs.f32 'Sd, 'Sn, 'Sm");
}
break;
case 0x2:
if (dp_operation) {
Format(instr, "vselge.f64 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vselge.f32 'Sd, 'Sn, 'Sm");
}
break;
case 0x3:
if (dp_operation) {
Format(instr, "vselgt.f64 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vselgt.f32 'Sd, 'Sn, 'Sm");
}
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
} else {
Unknown(instr);
}
break;
default:
Unknown(instr);
break;
@ -1968,7 +2043,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.start();
}
@ -1979,7 +2054,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
return v8::internal::Register::from_code(reg).ToString();
return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
}
@ -2031,9 +2106,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(
f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
*reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}

64
deps/v8/src/arm/eh-frame-arm.cc

@ -0,0 +1,64 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/eh-frame.h"
namespace v8 {
namespace internal {
static const int kR0DwarfCode = 0;
static const int kFpDwarfCode = 11;
static const int kSpDwarfCode = 13;
static const int kLrDwarfCode = 14;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -4;
void EhFrameWriter::WriteReturnAddressRegisterCode() {
WriteULeb128(kLrDwarfCode);
}
void EhFrameWriter::WriteInitialStateInCie() {
SetBaseAddressRegisterAndOffset(fp, 0);
RecordRegisterNotModified(lr);
}
// static
int EhFrameWriter::RegisterToDwarfCode(Register name) {
switch (name.code()) {
case Register::kCode_fp:
return kFpDwarfCode;
case Register::kCode_sp:
return kSpDwarfCode;
case Register::kCode_lr:
return kLrDwarfCode;
case Register::kCode_r0:
return kR0DwarfCode;
default:
UNIMPLEMENTED();
return -1;
}
}
#ifdef ENABLE_DISASSEMBLER
// static
const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
switch (code) {
case kFpDwarfCode:
return "fp";
case kSpDwarfCode:
return "sp";
case kLrDwarfCode:
return "lr";
default:
UNIMPLEMENTED();
return nullptr;
}
}
#endif
} // namespace internal
} // namespace v8

110
deps/v8/src/arm/interface-descriptors-arm.cc

@ -13,6 +13,19 @@ namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
CallInterfaceDescriptorData* data, int register_parameter_count) {
const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
CHECK_LE(static_cast<size_t>(register_parameter_count),
arraysize(default_stub_registers));
data->InitializePlatformSpecific(register_parameter_count,
default_stub_registers);
}
const Register FastNewFunctionContextDescriptor::FunctionRegister() {
return r1;
}
const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r0; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
@ -25,13 +38,9 @@ const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register StoreDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
const Register VectorStoreTransitionDescriptor::SlotRegister() { return r4; }
const Register VectorStoreTransitionDescriptor::VectorRegister() { return r3; }
@ -41,23 +50,15 @@ const Register VectorStoreTransitionDescriptor::MapRegister() { return r5; }
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
const Register InstanceOfDescriptor::LeftRegister() { return r1; }
const Register InstanceOfDescriptor::RightRegister() { return r0; }
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
const Register ApiGetterDescriptor::function_address() { return r2; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
@ -77,13 +78,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewContextDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
@ -248,50 +242,35 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void AllocateInNewSpaceDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void InternalArrayConstructorConstantArgCountDescriptor::
InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
// r1 -- constructor function
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r0};
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastArrayPushDescriptor::InitializePlatformSpecific(
void VarArgFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (arg count)
Register registers[] = {r0};
@ -318,6 +297,22 @@ void BinaryOpWithAllocationSiteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r1 -- lhs
// r0 -- rhs
// r4 -- slot id
// r3 -- vector
Register registers[] = {r1, r0, r4, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CountOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void StringAddDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -398,9 +393,8 @@ void ApiCallbackDescriptorBase::InitializePlatformSpecific(
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
kInterpreterDispatchTableRegister};
kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -435,6 +429,16 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume
r2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8

433
deps/v8/src/arm/macro-assembler-arm.cc

@ -89,17 +89,6 @@ int MacroAssembler::CallStubSize(
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
Address target,
RelocInfo::Mode rmode,
Condition cond) {
Instr mov_instr = cond | MOV | LeaveCC;
Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
return kInstrSize +
mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
}
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
@ -131,12 +120,6 @@ void MacroAssembler::Call(Address target,
// blx ip
// @ return address
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
// positions when pc is the target, since this is not the case here
// we have to do it explicitly.
positions_recorder()->WriteRecordedPositions();
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
@ -173,6 +156,40 @@ void MacroAssembler::Call(Handle<Code> code,
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
void MacroAssembler::CallDeoptimizer(Address target) {
BlockConstPoolScope block_const_pool(this);
uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
// We use blx, like a call, but it does not return here. The link register is
// used by the deoptimizer to work out what called it.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
movw(ip, target_raw & 0xffff);
movt(ip, (target_raw >> 16) & 0xffff);
blx(ip);
} else {
// We need to load a literal, but we can't use the usual constant pool
// because we call this from a patcher, and cannot afford the guard
// instruction and other administrative overhead.
ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
blx(ip);
dd(target_raw);
}
}
int MacroAssembler::CallDeoptimizerSize() {
// ARMv7+:
// movw ip, ...
// movt ip, ...
// blx ip @ This never returns.
//
// ARMv6:
// ldr ip, =address
// blx ip @ This never returns.
// .word address
return 3 * kInstrSize;
}
void MacroAssembler::Ret(Condition cond) {
bx(lr, cond);
@ -223,19 +240,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
void MacroAssembler::Move(Register dst, Handle<Object> value) {
AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
mov(dst, Operand(value));
} else {
DCHECK(value->IsHeapObject());
if (isolate()->heap()->InNewSpace(*value)) {
Handle<Cell> cell = isolate()->factory()->NewCell(value);
mov(dst, Operand(cell));
ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
} else {
mov(dst, Operand(value));
}
}
mov(dst, Operand(value));
}
@ -245,6 +250,11 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
}
void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
if (!dst.is(src)) {
@ -252,11 +262,10 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
}
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) {
if (CpuFeatures::IsSupported(MLS)) {
CpuFeatureScope scope(this, MLS);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
mls(dst, src1, src2, srcA, cond);
} else {
DCHECK(!srcA.is(ip));
@ -355,37 +364,6 @@ void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
}
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
DCHECK(!dst.is(pc) && !src.rm().is(pc));
DCHECK((satpos >= 0) && (satpos <= 31));
// These asserts are required to ensure compatibility with the ARMv7
// implementation.
DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
DCHECK(src.rs().is(no_reg));
Label done;
int satval = (1 << satpos) - 1;
if (cond != al) {
b(NegateCondition(cond), &done); // Skip saturate if !condition.
}
if (!(src.is_reg() && dst.is(src.rm()))) {
mov(dst, src);
}
tst(dst, Operand(~satval));
b(eq, &done);
mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
usat(dst, satpos, src, cond);
}
}
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
@ -872,8 +850,7 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time.
DCHECK(!serializer_enabled());
// General purpose registers are pushed last on the stack.
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
@ -889,10 +866,8 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2,
// below doesn't support it yet.
DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
(dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
CpuFeatureScope scope(this, ARMv7);
// Generate two ldr instructions if ldrd is not applicable.
if ((dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
ldrd(dst1, dst2, src, cond);
} else {
if ((src.am() == Offset) || (src.am() == NegOffset)) {
@ -930,10 +905,8 @@ void MacroAssembler::Strd(Register src1, Register src2,
// below doesn't support it yet.
DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
(src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
CpuFeatureScope scope(this, ARMv7);
// Generate two str instructions if strd is not applicable.
if ((src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
@ -950,30 +923,12 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
}
void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
// If needed, restore wanted bits of FPSCR.
Label fpscr_done;
vmrs(scratch);
if (emit_debug_code()) {
Label rounding_mode_correct;
tst(scratch, Operand(kVFPRoundingModeMask));
b(eq, &rounding_mode_correct);
// Don't call Assert here, since Runtime_Abort could re-enter here.
stop("Default rounding mode not set");
bind(&rounding_mode_correct);
}
tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
b(ne, &fpscr_done);
orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
vmsr(scratch);
bind(&fpscr_done);
}
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Subtracting 0.0 preserves all inputs except for signalling NaNs, which
// become quiet NaNs. We use vsub rather than vadd because vsub preserves -0.0
// inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
vsub(dst, src, kDoubleRegZero, cond);
}
@ -1049,13 +1004,11 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value_rep(imm);
int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
if (value_rep == zero) {
if (imm_bits == bit_cast<int64_t>(0.0)) {
vmov(dst, kDoubleRegZero);
} else if (value_rep == minus_zero) {
} else if (imm_bits == bit_cast<int64_t>(-0.0)) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
@ -1290,9 +1243,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
ldr(vector,
FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
}
@ -1332,13 +1284,29 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
Register argc) {
Push(lr, fp, context, target);
add(fp, sp, Operand(2 * kPointerSize));
Push(argc);
}
void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
Register argc) {
Pop(argc);
Pop(lr, fp, context, target);
}
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Set up the frame structure on the stack.
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
mov(ip, Operand(Smi::FromInt(StackFrame::EXIT)));
mov(ip, Operand(Smi::FromInt(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@ -1606,12 +1574,13 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_flooding;
ExternalReference step_in_enabled =
ExternalReference::debug_step_in_enabled_address(isolate());
mov(r4, Operand(step_in_enabled));
ldrb(r4, MemOperand(r4));
cmp(r4, Operand(0));
b(eq, &skip_flooding);
ExternalReference last_step_action =
ExternalReference::debug_last_step_action_address(isolate());
STATIC_ASSERT(StepFrame > StepIn);
mov(r4, Operand(last_step_action));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(StepIn));
b(lt, &skip_flooding);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -2003,6 +1972,7 @@ void MacroAssembler::Allocate(int object_size,
Label* gc_required,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -2090,26 +2060,29 @@ void MacroAssembler::Allocate(int object_size,
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
add(result_end, source, bits_operand, SetCC, cond);
add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
b(cs, gc_required);
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
str(result_end, MemOperand(top_address));
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
add(result, result, Operand(kHeapObjectTag));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
}
// Tag object.
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
Register result_end, Register scratch,
Label* gc_required, AllocationFlags flags) {
DCHECK((flags & ALLOCATION_FOLDED) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@ -2185,7 +2158,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
} else {
add(result_end, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
cmp(result_end, Operand(alloc_limit));
b(hi, gc_required);
@ -2194,14 +2167,122 @@ void MacroAssembler::Allocate(Register object_size, Register result,
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
str(result_end, MemOperand(top_address));
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
}
// Tag object.
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
Register result_end, Register scratch,
AllocationFlags flags) {
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
DCHECK(!AreAliased(object_size, result, scratch, ip));
DCHECK(!AreAliased(result_end, result, scratch, ip));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Tag object if requested.
if ((flags & TAG_OBJECT) != 0) {
add(result, result, Operand(kHeapObjectTag));
Register top_address = scratch;
mov(top_address, Operand(allocation_top));
ldr(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
add(result_end, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
add(result_end, result, Operand(object_size), SetCC);
}
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
tst(result_end, Operand(kObjectAlignmentMask));
Check(eq, kUnalignedAllocationInNewSpace);
}
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
Register scratch1, Register scratch2,
AllocationFlags flags) {
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!AreAliased(result, scratch1, scratch2, ip));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
DCHECK_EQ(0, object_size & kObjectAlignmentMask);
ExternalReference allocation_top =
AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Set up allocation top address register.
Register top_address = scratch1;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
ldr(result, MemOperand(top_address));
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(result_end, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
b(eq, &aligned);
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
str(result_end, MemOperand(result, kDoubleSize / 2, PostIndex));
bind(&aligned);
}
// Calculate new top using result. Object size may be in words so a shift is
// required to get the number of bytes. We must preserve the ip register at
// this point, so we cannot just use add().
DCHECK(object_size > 0);
Register source = result;
Condition cond = al;
int shift = 0;
while (object_size != 0) {
if (((object_size >> shift) & 0x03) == 0) {
shift += 2;
} else {
int bits = object_size & (0xff << shift);
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
DCHECK(bits_operand.instructions_required(this) == 1);
add(result_end, source, bits_operand, LeaveCC, cond);
source = result_end;
cond = cc;
}
}
// The top pointer is not updated for allocation folding dominators.
str(result_end, MemOperand(top_address));
add(result, result, Operand(kHeapObjectTag));
}
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
@ -2218,12 +2299,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
Allocate(scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
@ -2247,12 +2324,8 @@ void MacroAssembler::AllocateOneByteString(Register result, Register length,
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
scratch3,
gc_required,
TAG_OBJECT);
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@ -2266,7 +2339,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@ -2280,12 +2353,8 @@ void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
@ -2298,7 +2367,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
@ -2314,7 +2383,7 @@ void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
@ -2414,12 +2483,6 @@ void MacroAssembler::StoreNumberToDoubleElements(
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Force a canonical NaN.
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
Assert(ne, kDefaultNaNModeNotSet);
}
VFPCanonicalizeNaN(double_scratch);
b(&store);
@ -2803,18 +2866,18 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
#if defined(__thumb__)
// Thumb mode builtin.
DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif
mov(r1, Operand(builtin));
CEntryStub stub(isolate(), 1);
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
builtin_exit_frame);
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@ -2902,17 +2965,19 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
mov(r0, Operand(Smi::FromInt(reason)));
push(r0);
// Check if Abort() has already been initialized.
DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
Move(r1, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort);
Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
} else {
CallRuntime(Runtime::kAbort);
Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
}
// will not return here
if (is_const_pool_blocked()) {
@ -3129,6 +3194,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, kOperandIsASmiAndNotAGeneratorObject);
push(object);
CompareObjectType(object, object, object, JS_GENERATOR_OBJECT_TYPE);
pop(object);
Check(eq, kOperandIsNotAGeneratorObject);
}
}
void MacroAssembler::AssertReceiver(Register object) {
if (emit_debug_code()) {
@ -3225,12 +3301,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch2,
Register heap_number_map,
Label* gc_required,
TaggingMode tagging_mode,
MutableMode mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
NO_ALLOCATION_FLAGS);
Heap::RootListIndex map_index = mode == MUTABLE
? Heap::kMutableHeapNumberMapRootIndex
@ -3238,11 +3313,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
if (tagging_mode == TAG_RESULT) {
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
}
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@ -3267,7 +3338,8 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
DCHECK(!result.is(value));
// Allocate JSValue in new space.
Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@ -3306,17 +3378,7 @@ void MacroAssembler::CopyBytes(Register src,
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
} else {
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
}
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
sub(length, length, Operand(kPointerSize));
b(&word_loop);
@ -3662,7 +3724,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
Usat(output_reg, 8, Operand(input_reg));
usat(output_reg, 8, Operand(input_reg));
}
@ -3770,7 +3832,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Label* no_memento_found) {
Label map_check;
Label top_check;
ExternalReference new_space_allocation_top =
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@ -3780,7 +3842,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
eor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
eor(scratch_reg, scratch_reg, Operand(ip));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(eq, &top_check);
// The object is on a different page than allocation top. Bail out if the
@ -3796,7 +3860,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// we are below top.
bind(&top_check);
add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
cmp(scratch_reg, Operand(new_space_allocation_top));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
b(gt, no_memento_found);
// Memento map check.
bind(&map_check);
@ -3818,8 +3884,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
@ -3917,6 +3982,10 @@ CodePatcher::~CodePatcher() {
Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that we don't have any pending constant pools.
DCHECK(masm_.pending_32_bit_constants_.empty());
DCHECK(masm_.pending_64_bit_constants_.empty());
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);

62
deps/v8/src/arm/macro-assembler-arm.h

@ -19,8 +19,8 @@ const Register kReturnRegister1 = {Register::kCode_r1};
const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
@ -101,10 +101,6 @@ class MacroAssembler: public Assembler {
int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
static int CallSizeNotPredictableCodeSize(Isolate* isolate,
Address target,
RelocInfo::Mode rmode,
Condition cond = al);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
@ -114,17 +110,19 @@ class MacroAssembler: public Assembler {
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Ret(Condition cond = al);
// Used for patching in calls to the deoptimizer.
void CallDeoptimizer(Address target);
static int CallDeoptimizerSize();
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
@ -157,8 +155,6 @@ class MacroAssembler: public Assembler {
int width,
Condition cond = al);
void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al);
void Call(Label* target);
void Push(Register src) { push(src); }
@ -174,6 +170,7 @@ class MacroAssembler: public Assembler {
mov(dst, src, sbit, cond);
}
}
void Move(SwVfpRegister dst, SwVfpRegister src);
void Move(DwVfpRegister dst, DwVfpRegister src);
void Load(Register dst, const MemOperand& src, Representation r);
@ -489,15 +486,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// Ensure that FPSCR contains values needed by JavaScript.
// We need the NaNModeControlBit to be sure that operations like
// vadd and vsub generate the Canonical NaN (if a NaN must be generated).
// In VFP3 it will be always the Canonical NaN.
// In VFP2 it will be either the Canonical NaN or the negative version
// of the Canonical NaN. It doesn't matter if we have two values. The aim
// is to be sure to never generate the hole NaN.
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister dst,
const DwVfpRegister src,
@ -602,7 +590,8 @@ class MacroAssembler: public Assembler {
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0);
void EnterExitFrame(bool save_doubles, int stack_space = 0,
StackFrame::Type frame_type = StackFrame::EXIT);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
@ -792,6 +781,15 @@ class MacroAssembler: public Assembler {
void Allocate(Register object_size, Register result, Register result_end,
Register scratch, Label* gc_required, AllocationFlags flags);
// FastAllocate is right now only used for folded allocations. It just
// increments the top pointer without checking against limit. This can only
// be done if it was proved earlier that the allocation will succeed.
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
@ -826,7 +824,6 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
TaggingMode tagging_mode = TAG_RESULT,
MutableMode mode = IMMUTABLE);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
@ -1170,7 +1167,8 @@ class MacroAssembler: public Assembler {
void MovFromFloatResult(DwVfpRegister dst);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@ -1326,6 +1324,10 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
// Abort execution if argument is not a JSGeneratorObject,
// enabled via --debug-code.
void AssertGeneratorObject(Register object);
// Abort execution if argument is not a JSReceiver, enabled via --debug-code.
void AssertReceiver(Register object);
@ -1436,6 +1438,9 @@ class MacroAssembler: public Assembler {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
void EnterBuiltinFrame(Register context, Register target, Register argc);
void LeaveBuiltinFrame(Register context, Register target, Register argc);
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
@ -1568,16 +1573,7 @@ inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} // namespace internal
} // namespace v8

300
deps/v8/src/arm/simulator-arm.cc

@ -33,7 +33,6 @@ namespace internal {
class ArmDebugger {
public:
explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
~ArmDebugger();
void Stop(Instruction* instr);
void Debug();
@ -62,77 +61,18 @@ class ArmDebugger {
void RedoBreakpoints();
};
ArmDebugger::~ArmDebugger() {
}
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitializeCoverage() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
char** msg_address =
reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
char* msg = *msg_address;
DCHECK(msg != NULL);
// Update this stop description.
if (isWatchedStop(code) && !watched_stops_[code].desc) {
watched_stops_[code].desc = msg;
}
if (strlen(msg) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", msg);
fflush(coverage_log);
}
// Overwrite the instruction and address with nops.
instr->SetInstructionBits(kNopInstr);
reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
}
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
}
#else // ndef GENERATED_CODE_COVERAGE
static void InitializeCoverage() {
}
void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
char* msg = *reinterpret_cast<char**>(sim_->get_pc()
+ Instruction::kInstrSize);
// Update this stop description.
if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
sim_->watched_stops_[code].desc = msg;
}
// Print the stop message and code if it is not the default code.
if (code != kMaxStopCode) {
PrintF("Simulator hit stop %u: %s\n", code, msg);
PrintF("Simulator hit stop %u\n", code);
} else {
PrintF("Simulator hit %s\n", msg);
PrintF("Simulator hit\n");
}
sim_->set_pc(sim_->get_pc() + 2 * Instruction::kInstrSize);
Debug();
}
#endif
int32_t ArmDebugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) {
@ -142,7 +82,6 @@ int32_t ArmDebugger::GetRegisterValue(int regnum) {
}
}
double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
@ -299,8 +238,11 @@ void ArmDebugger::Debug() {
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
value, value);
PrintF(
"%3s: 0x%08x %10d",
RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
i),
value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
@ -387,7 +329,7 @@ void ArmDebugger::Debug() {
end = cur + words;
while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d",
PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
@ -449,8 +391,8 @@ void ArmDebugger::Debug() {
while (cur < end) {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08x %s\n",
reinterpret_cast<intptr_t>(prev), buffer.start());
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
buffer.start());
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@ -633,9 +575,7 @@ void Simulator::set_last_debugger_input(char* input) {
last_debugger_input_ = input;
}
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
@ -656,10 +596,8 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
}
}
CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
v8::internal::HashMap::Entry* entry =
i_cache->LookupOrInsert(page, ICacheHash(page));
CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@ -669,9 +607,7 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
intptr_t start,
int size) {
void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
@ -683,9 +619,7 @@ void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction* instr) {
void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@ -718,7 +652,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch);
i_cache_ = new base::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
@ -769,7 +703,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
last_debugger_input_ = NULL;
}
@ -850,10 +783,10 @@ class Redirection {
// static
void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
Redirection::DeleteChain(first);
if (i_cache != nullptr) {
for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
entry = i_cache->Next(entry)) {
delete static_cast<CachePage*>(entry->value);
}
@ -1112,98 +1045,51 @@ void Simulator::TrashCallerSaveRegisters() {
}
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
// The following statements below describes the behavior of the ARM CPUs
// that don't support unaligned access.
// Some ARM platforms raise an interrupt on detecting unaligned access.
// On others it does a funky rotation thing. For now we
// simply disallow unaligned reads. Note that simulator runs have the runtime
// system running directly on the host system and only generated code is
// executed in the simulator. Since the host is typically IA32 we will not
// get the correct ARM-like behaviour on unaligned accesses for those ARM
// targets that don't support unaligned loads and stores.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
}
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
} else {
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08"
V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
}
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if (FLAG_enable_unaligned_accesses || (addr & 1) == 0) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
} else {
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
}
@ -1232,26 +1118,19 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
int32_t* Simulator::ReadDW(int32_t addr) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
} else {
PrintF("Unaligned read at 0x%08x\n", addr);
UNIMPLEMENTED();
return 0;
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return ptr;
}
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
if (FLAG_enable_unaligned_accesses || (addr & 3) == 0) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
} else {
PrintF("Unaligned write at 0x%08x\n", addr);
UNIMPLEMENTED();
}
// All supported ARM targets allow unaligned accesses, so we don't need to
// check the alignment here.
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr++ = value1;
*ptr = value2;
}
@ -1271,7 +1150,7 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED();
}
@ -1808,15 +1687,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(generic_target), dval0, dval1);
static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
PrintF("Call to host function at %p with arg %f",
FUNCTION_ADDR(generic_target), dval0);
static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Call to host function at %p with args %f, %d",
FUNCTION_ADDR(generic_target), dval0, ival);
static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
ival);
break;
default:
UNREACHABLE();
@ -1942,7 +1823,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host triple returning runtime function %p "
"args %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@ -1953,7 +1835,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// pass it to the target function.
ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
static_cast<void*>(result.y), static_cast<void*>(result.z));
}
// Return is passed back in address pointed to by hidden first argument.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@ -1969,13 +1852,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host function at %p "
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3,
arg4,
arg5);
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
arg4, arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@ -3733,11 +3611,14 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for singles must be at least four-byte
// aligned.
DCHECK((address % 4) == 0);
if (instr->HasL()) {
// Load double from memory: vldr.
// Load single from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
} else {
// Store double to memory: vstr.
// Store single to memory: vstr.
WriteW(address, get_sinteger_from_s_register(vd), instr);
}
break;
@ -3786,6 +3667,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
offset = -offset;
}
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for doubles must be at least four-byte
// aligned.
DCHECK((address % 4) == 0);
if (instr->HasL()) {
// Load double from memory: vldr.
int32_t data[] = {
@ -4028,6 +3912,45 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNIMPLEMENTED();
}
break;
case 0x1C:
if ((instr->Bits(11, 9) == 0x5) && (instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
// VSEL* (floating-point)
bool condition_holds;
switch (instr->Bits(21, 20)) {
case 0x0: // VSELEQ
condition_holds = (z_flag_ == 1);
break;
case 0x1: // VSELVS
condition_holds = (v_flag_ == 1);
break;
case 0x2: // VSELGE
condition_holds = (n_flag_ == v_flag_);
break;
case 0x3: // VSELGT
condition_holds = ((z_flag_ == 0) && (n_flag_ == v_flag_));
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
break;
}
if (instr->SzValue() == 0x1) {
int n = instr->VFPNRegValue(kDoublePrecision);
int m = instr->VFPMRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
double result = get_double_from_d_register(condition_holds ? n : m);
set_d_register_from_double(d, result);
} else {
int n = instr->VFPNRegValue(kSinglePrecision);
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
float result = get_float_from_s_register(condition_holds ? n : m);
set_s_register_from_float(d, result);
}
} else {
UNIMPLEMENTED();
}
break;
default:
UNIMPLEMENTED();
break;
@ -4048,7 +3971,8 @@ void Simulator::InstructionDecode(Instruction* instr) {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr));
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(instr),
buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);

16
deps/v8/src/arm/simulator-arm.h

@ -68,7 +68,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/hashmap.h"
#include "src/base/hashmap.h"
namespace v8 {
namespace internal {
@ -200,7 +200,7 @@ class Simulator {
// Call on program start.
static void Initialize(Isolate* isolate);
static void TearDown(HashMap* i_cache, Redirection* first);
static void TearDown(base::HashMap* i_cache, Redirection* first);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@ -222,8 +222,7 @@ class Simulator {
char* last_debugger_input() { return last_debugger_input_; }
// ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
@ -342,10 +341,9 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
static void CheckICache(base::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
@ -405,7 +403,7 @@ class Simulator {
char* last_debugger_input_;
// Icache simulation
v8::internal::HashMap* i_cache_;
base::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;

37
deps/v8/src/arm64/assembler-arm64-inl.h

@ -16,6 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@ -26,34 +27,6 @@ void RelocInfo::apply(intptr_t delta) {
*p += delta; // Relocate entry.
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
Assembler::set_target_address_at(isolate_, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
}
}
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, size_t old_size, size_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK(old_base <= wasm_memory_reference() &&
wasm_memory_reference() < old_base + old_size);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
DCHECK(new_base <= updated_reference &&
updated_reference < new_base + new_size);
Assembler::set_target_address_at(isolate_, pc_, host_, updated_reference,
icache_flush_mode);
}
inline int CPURegister::code() const {
DCHECK(IsValid());
return reg_code;
@ -705,11 +678,6 @@ Address RelocInfo::target_address() {
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
@ -749,6 +717,7 @@ void RelocInfo::set_target_object(Object* target,
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
}
}
@ -868,7 +837,7 @@ void RelocInfo::WipeOut() {
}
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {

161
deps/v8/src/arm64/assembler-arm64.cc

@ -51,26 +51,13 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
// Probe for runtime features
base::CPU cpu;
if (cpu.implementer() == base::CPU::NVIDIA &&
cpu.variant() == base::CPU::NVIDIA_DENVER &&
cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
// TODO(jkummerow): This is turned off as an experiment to see if it
// affects crash rates. Keep an eye on crash reports and either remove
// coherent cache support permanently, or re-enable it!
// supported_ |= 1u << COHERENT_CACHE;
}
// We used to probe for coherent cache support, but on older CPUs it
// causes crashes (crbug.com/524337), and newer CPUs don't even have
// the feature any more.
}
void CpuFeatures::PrintTarget() { }
void CpuFeatures::PrintFeatures() {
printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
}
void CpuFeatures::PrintFeatures() {}
// -----------------------------------------------------------------------------
// CPURegList utilities.
@ -192,12 +179,35 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
Address RelocInfo::wasm_memory_reference() {
DCHECK(IsWasmMemoryReference(rmode_));
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
uint32_t RelocInfo::wasm_memory_size_reference() {
DCHECK(IsWasmMemorySizeReference(rmode_));
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
Address RelocInfo::wasm_global_reference() {
DCHECK(IsWasmGlobalReference(rmode_));
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
const RegisterConfiguration* config =
RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
Register candidate = Register::from_code(code);
@ -269,7 +279,6 @@ void Immediate::InitializeHandle(Handle<Object> handle) {
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
value_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
@ -294,13 +303,11 @@ bool Operand::NeedsRelocation(const Assembler* assembler) const {
// Constant Pool.
void ConstPool::RecordEntry(intptr_t data,
RelocInfo::Mode mode) {
DCHECK(mode != RelocInfo::COMMENT &&
mode != RelocInfo::POSITION &&
mode != RelocInfo::STATEMENT_POSITION &&
mode != RelocInfo::CONST_POOL &&
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
mode != RelocInfo::DEOPT_REASON);
mode != RelocInfo::DEOPT_POSITION && mode != RelocInfo::DEOPT_REASON &&
mode != RelocInfo::DEOPT_ID);
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
@ -544,8 +551,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_(),
positions_recorder_(this) {
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
Reset();
@ -579,7 +585,6 @@ void Assembler::Reset() {
void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.Finish();
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
@ -594,6 +599,8 @@ void Assembler::GetCode(CodeDesc* desc) {
reloc_info_writer.pos());
desc->origin = this;
desc->constant_pool_size = 0;
desc->unwinding_info_size = 0;
desc->unwinding_info = nullptr;
}
}
@ -962,14 +969,12 @@ void Assembler::EndBlockVeneerPool() {
void Assembler::br(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
Emit(BR | Rn(xn));
}
void Assembler::blr(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
// The pattern 'blr xzr' is used as a guard to detect when execution falls
// through the constant pool. It should not be emitted.
@ -979,7 +984,6 @@ void Assembler::blr(const Register& xn) {
void Assembler::ret(const Register& xn) {
positions_recorder()->WriteRecordedPositions();
DCHECK(xn.Is64Bits());
Emit(RET | Rn(xn));
}
@ -991,7 +995,6 @@ void Assembler::b(int imm26) {
void Assembler::b(Label* label) {
positions_recorder()->WriteRecordedPositions();
b(LinkAndGetInstructionOffsetTo(label));
}
@ -1002,47 +1005,40 @@ void Assembler::b(int imm19, Condition cond) {
void Assembler::b(Label* label, Condition cond) {
positions_recorder()->WriteRecordedPositions();
b(LinkAndGetInstructionOffsetTo(label), cond);
}
void Assembler::bl(int imm26) {
positions_recorder()->WriteRecordedPositions();
Emit(BL | ImmUncondBranch(imm26));
}
void Assembler::bl(Label* label) {
positions_recorder()->WriteRecordedPositions();
bl(LinkAndGetInstructionOffsetTo(label));
}
void Assembler::cbz(const Register& rt,
int imm19) {
positions_recorder()->WriteRecordedPositions();
Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
}
void Assembler::cbz(const Register& rt,
Label* label) {
positions_recorder()->WriteRecordedPositions();
cbz(rt, LinkAndGetInstructionOffsetTo(label));
}
void Assembler::cbnz(const Register& rt,
int imm19) {
positions_recorder()->WriteRecordedPositions();
Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
}
void Assembler::cbnz(const Register& rt,
Label* label) {
positions_recorder()->WriteRecordedPositions();
cbnz(rt, LinkAndGetInstructionOffsetTo(label));
}
@ -1050,7 +1046,6 @@ void Assembler::cbnz(const Register& rt,
void Assembler::tbz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@ -1059,7 +1054,6 @@ void Assembler::tbz(const Register& rt,
void Assembler::tbz(const Register& rt,
unsigned bit_pos,
Label* label) {
positions_recorder()->WriteRecordedPositions();
tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
@ -1067,7 +1061,6 @@ void Assembler::tbz(const Register& rt,
void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
int imm14) {
positions_recorder()->WriteRecordedPositions();
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
@ -1076,7 +1069,6 @@ void Assembler::tbnz(const Register& rt,
void Assembler::tbnz(const Register& rt,
unsigned bit_pos,
Label* label) {
positions_recorder()->WriteRecordedPositions();
tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
@ -1696,6 +1688,83 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
ldr_pcrel(rt, 0);
}
void Assembler::ldar(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldarb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxrb(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldarh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxrh(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::mov(const Register& rd, const Register& rm) {
// Moves involving the stack pointer are encoded as add immediate with
@ -2878,11 +2947,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON) ||
(rmode == RelocInfo::DEOPT_POSITION) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsGeneratorContinuation(rmode));

65
deps/v8/src/arm64/assembler-arm64.h

@ -40,12 +40,22 @@ namespace internal {
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
V(s8) V(s9) V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
#define DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d15) \
R(d16) R(d17) R(d18) R(d19) R(d20) R(d21) R(d22) R(d23) \
R(d24) R(d25) R(d26) R(d27) R(d28) R(d29) R(d30) R(d31)
#define SIMD128_REGISTERS(V) \
V(q0) V(q1) V(q2) V(q3) V(q4) V(q5) V(q6) V(q7) \
V(q8) V(q9) V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
#define ALLOCATABLE_DOUBLE_REGISTERS(R) \
R(d0) R(d1) R(d2) R(d3) R(d4) R(d5) R(d6) R(d7) \
R(d8) R(d9) R(d10) R(d11) R(d12) R(d13) R(d14) R(d16) \
@ -148,8 +158,6 @@ struct Register : public CPURegister {
DCHECK(IsValidOrNone());
}
const char* ToString();
bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsRegister() || IsNone());
return IsValidRegister();
@ -189,6 +197,7 @@ struct Register : public CPURegister {
// End of V8 compatibility section -----------------------
};
static const bool kSimpleFPAliasing = true;
struct FPRegister : public CPURegister {
enum Code {
@ -224,8 +233,6 @@ struct FPRegister : public CPURegister {
DCHECK(IsValidOrNone());
}
const char* ToString();
bool IsAllocatable() const;
bool IsValid() const {
DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
@ -366,7 +373,7 @@ bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
typedef FPRegister FloatRegister;
typedef FPRegister DoubleRegister;
// TODO(arm64) Define SIMD registers.
@ -922,14 +929,11 @@ class Assembler : public AssemblerBase {
}
// Debugging ----------------------------------------------------------------
AssemblerPositionsRecorder* positions_recorder() {
return &positions_recorder_;
}
void RecordComment(const char* msg);
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
void RecordDeoptReason(const int reason, int raw_position);
void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
int buffer_space() const;
@ -1395,6 +1399,42 @@ class Assembler : public AssemblerBase {
// Load literal to register.
void ldr(const CPURegister& rt, const Immediate& imm);
// Load-acquire word.
void ldar(const Register& rt, const Register& rn);
// Load-acquire exclusive word.
void ldaxr(const Register& rt, const Register& rn);
// Store-release word.
void stlr(const Register& rt, const Register& rn);
// Store-release exclusive word.
void stlxr(const Register& rs, const Register& rt, const Register& rn);
// Load-acquire byte.
void ldarb(const Register& rt, const Register& rn);
// Load-acquire exclusive byte.
void ldaxrb(const Register& rt, const Register& rn);
// Store-release byte.
void stlrb(const Register& rt, const Register& rn);
// Store-release exclusive byte.
void stlxrb(const Register& rs, const Register& rt, const Register& rn);
// Load-acquire half-word.
void ldarh(const Register& rt, const Register& rn);
// Load-acquire exclusive half-word.
void ldaxrh(const Register& rt, const Register& rn);
// Store-release half-word.
void stlrh(const Register& rt, const Register& rn);
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const Register& rn);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift
@ -1689,6 +1729,11 @@ class Assembler : public AssemblerBase {
return rt2.code() << Rt2_offset;
}
static Instr Rs(CPURegister rs) {
DCHECK(rs.code() != kSPRegInternalCode);
return rs.code() << Rs_offset;
}
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
@ -2137,8 +2182,6 @@ class Assembler : public AssemblerBase {
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private:
AssemblerPositionsRecorder positions_recorder_;
friend class AssemblerPositionsRecorder;
friend class EnsureSpace;
friend class ConstPool;
};

812
deps/v8/src/arm64/code-stubs-arm64.cc

File diff suppressed because it is too large

206
deps/v8/src/arm64/codegen-arm64.cc

@ -15,66 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_arm64_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
Simulator * simulator = Simulator::current(isolate);
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
return simulator->CallDouble(fast_exp_arm64_machine_code, args);
}
#endif
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
if (buffer == nullptr) return nullptr;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
DoubleRegister input = d0;
// Use other caller-saved registers for all other values.
DoubleRegister result = d1;
DoubleRegister double_temp1 = d2;
DoubleRegister double_temp2 = d3;
Register temp1 = x10;
Register temp2 = x11;
Register temp3 = x12;
MathExpGenerator::EmitMathExp(&masm, input, result,
double_temp1, double_temp2,
temp1, temp2, temp3);
// Move the result to the return register.
masm.Fmov(d0, result);
masm.Ret();
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
fast_exp_arm64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
@ -175,8 +115,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
@ -184,18 +124,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
scratch, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Move(x10, array);
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
@ -282,8 +222,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
@ -293,7 +233,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
// Allocating heap numbers in the loop below can fail and cause a jump to
@ -307,8 +247,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Cmp(dst_elements, dst_end);
__ B(lt, &initialization_loop);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Register heap_num_map = x15;
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
@ -511,127 +450,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Bind(&done);
}
static MemOperand ExpConstant(Register base, int index) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_temp1,
DoubleRegister double_temp2,
Register temp1,
Register temp2,
Register temp3) {
// TODO(jbramley): There are several instances where fnmsub could be used
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
DCHECK(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
DCHECK(!masm->serializer_enabled()); // External references not serializable.
Label done;
DoubleRegister double_temp3 = result;
Register constants = temp3;
// The algorithm used relies on some magic constants which are initialized in
// ExternalReference::InitializeMathExpData().
// Load the address of the start of the array.
__ Mov(constants, ExternalReference::math_exp_constants(0));
// We have to do a four-way split here:
// - If input <= about -708.4, the output always rounds to zero.
// - If input >= about 709.8, the output always rounds to +infinity.
// - If the input is NaN, the output is NaN.
// - Otherwise, the result needs to be calculated.
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
__ Fcmp(input, double_temp1);
__ Fccmp(input, double_temp2, NoFlag, hi);
// At this point, the condition flags can be in one of five states:
// NZCV
// 1000 -708.4 < input < 709.8 result = exp(input)
// 0110 input == 709.8 result = +infinity
// 0010 input > 709.8 result = +infinity
// 0011 input is NaN result = input
// 0000 input <= -708.4 result = +0.0
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
// TODO(jbramley): Consider adding a +infinity register for ARM64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
__ Fcsel(result, fp_zero, double_temp2, lo);
// Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
__ Fcsel(result, result, input, vc);
__ B(&done);
// The rest is magic, as described in InitializeMathExpData().
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
__ Fmov(temp2.W(), double_temp1.S());
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp1, double_temp1, double_temp2);
__ Fsub(double_temp1, double_temp1, input);
__ Fmul(double_temp2, double_temp1, double_temp1);
__ Fsub(double_temp3, double_temp3, double_temp1);
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
__ Ldr(double_temp2, ExpConstant(constants, 7));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Fsub(double_temp3, double_temp3, double_temp1);
// The 8th constant is 1.0, so use an immediate move rather than a load.
// We can't generate a runtime assertion here as we would need to call Abort
// in the runtime and we don't have an Isolate when we generate this code.
__ Fmov(double_temp2, 1.0);
__ Fadd(double_temp3, double_temp3, double_temp2);
__ And(temp2, temp2, 0x7ff);
__ Add(temp1, temp1, 0x3ff);
// Do the final table lookup.
__ Mov(temp3, ExternalReference::math_exp_log_table());
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
__ Bfi(temp2, temp1, 32, 32);
__ Fmov(double_temp1, temp2);
__ Fmul(result, double_temp3, double_temp1);
__ Bind(&done);
}
#undef __
} // namespace internal

17
deps/v8/src/arm64/codegen-arm64.h

@ -5,7 +5,6 @@
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast/ast.h"
#include "src/macro-assembler.h"
namespace v8 {
@ -27,22 +26,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} // namespace internal
} // namespace v8

189
deps/v8/src/arm64/constants-arm64.h

@ -117,89 +117,89 @@ const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8;
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
V_(Rd, 4, 0, Bits) /* Destination register. */ \
V_(Rn, 9, 5, Bits) /* First source register. */ \
V_(Rm, 20, 16, Bits) /* Second source register. */ \
V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
/* store second source. */ \
V_(PrefetchMode, 4, 0, Bits) \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \
V_(FlagsUpdate, 29, 29, Bits) \
\
/* PC relative addressing */ \
V_(ImmPCRelHi, 23, 5, SignedBits) \
V_(ImmPCRelLo, 30, 29, Bits) \
\
/* Add/subtract/logical shift register */ \
V_(ShiftDP, 23, 22, Bits) \
V_(ImmDPShift, 15, 10, Bits) \
\
/* Add/subtract immediate */ \
V_(ImmAddSub, 21, 10, Bits) \
V_(ShiftAddSub, 23, 22, Bits) \
\
/* Add/substract extend */ \
V_(ImmExtendShift, 12, 10, Bits) \
V_(ExtendMode, 15, 13, Bits) \
\
/* Move wide */ \
V_(ImmMoveWide, 20, 5, Bits) \
V_(ShiftMoveWide, 22, 21, Bits) \
\
/* Logical immediate, bitfield and extract */ \
V_(BitN, 22, 22, Bits) \
V_(ImmRotate, 21, 16, Bits) \
V_(ImmSetBits, 15, 10, Bits) \
V_(ImmR, 21, 16, Bits) \
V_(ImmS, 15, 10, Bits) \
\
/* Test and branch immediate */ \
V_(ImmTestBranch, 18, 5, SignedBits) \
V_(ImmTestBranchBit40, 23, 19, Bits) \
V_(ImmTestBranchBit5, 31, 31, Bits) \
\
/* Conditionals */ \
V_(Condition, 15, 12, Bits) \
V_(ConditionBranch, 3, 0, Bits) \
V_(Nzcv, 3, 0, Bits) \
V_(ImmCondCmp, 20, 16, Bits) \
V_(ImmCondBranch, 23, 5, SignedBits) \
\
/* Floating point */ \
V_(FPType, 23, 22, Bits) \
V_(ImmFP, 20, 13, Bits) \
V_(FPScale, 15, 10, Bits) \
\
/* Load Store */ \
V_(ImmLS, 20, 12, SignedBits) \
V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \
V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \
\
/* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \
V_(ImmCmpBranch, 23, 5, SignedBits) \
V_(ImmLLiteral, 23, 5, SignedBits) \
V_(ImmException, 20, 5, Bits) \
V_(ImmHint, 11, 5, Bits) \
V_(ImmBarrierDomain, 11, 10, Bits) \
V_(ImmBarrierType, 9, 8, Bits) \
\
/* System (MRS, MSR) */ \
V_(ImmSystemRegister, 19, 5, Bits) \
V_(SysO0, 19, 19, Bits) \
V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits) \
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
V_(Rd, 4, 0, Bits) /* Destination register. */ \
V_(Rn, 9, 5, Bits) /* First source register. */ \
V_(Rm, 20, 16, Bits) /* Second source register. */ \
V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load dest / store source. */ \
V_(Rt2, 14, 10, Bits) /* Load second dest / */ \
/* store second source. */ \
V_(Rs, 20, 16, Bits) /* Store-exclusive status */ \
V_(PrefetchMode, 4, 0, Bits) \
\
/* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \
V_(FlagsUpdate, 29, 29, Bits) \
\
/* PC relative addressing */ \
V_(ImmPCRelHi, 23, 5, SignedBits) \
V_(ImmPCRelLo, 30, 29, Bits) \
\
/* Add/subtract/logical shift register */ \
V_(ShiftDP, 23, 22, Bits) \
V_(ImmDPShift, 15, 10, Bits) \
\
/* Add/subtract immediate */ \
V_(ImmAddSub, 21, 10, Bits) \
V_(ShiftAddSub, 23, 22, Bits) \
\
/* Add/substract extend */ \
V_(ImmExtendShift, 12, 10, Bits) \
V_(ExtendMode, 15, 13, Bits) \
\
/* Move wide */ \
V_(ImmMoveWide, 20, 5, Bits) \
V_(ShiftMoveWide, 22, 21, Bits) \
\
/* Logical immediate, bitfield and extract */ \
V_(BitN, 22, 22, Bits) \
V_(ImmRotate, 21, 16, Bits) \
V_(ImmSetBits, 15, 10, Bits) \
V_(ImmR, 21, 16, Bits) \
V_(ImmS, 15, 10, Bits) \
\
/* Test and branch immediate */ \
V_(ImmTestBranch, 18, 5, SignedBits) \
V_(ImmTestBranchBit40, 23, 19, Bits) \
V_(ImmTestBranchBit5, 31, 31, Bits) \
\
/* Conditionals */ \
V_(Condition, 15, 12, Bits) \
V_(ConditionBranch, 3, 0, Bits) \
V_(Nzcv, 3, 0, Bits) \
V_(ImmCondCmp, 20, 16, Bits) \
V_(ImmCondBranch, 23, 5, SignedBits) \
\
/* Floating point */ \
V_(FPType, 23, 22, Bits) \
V_(ImmFP, 20, 13, Bits) \
V_(FPScale, 15, 10, Bits) \
\
/* Load Store */ \
V_(ImmLS, 20, 12, SignedBits) \
V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \
V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \
\
/* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \
V_(ImmCmpBranch, 23, 5, SignedBits) \
V_(ImmLLiteral, 23, 5, SignedBits) \
V_(ImmException, 20, 5, Bits) \
V_(ImmHint, 11, 5, Bits) \
V_(ImmBarrierDomain, 11, 10, Bits) \
V_(ImmBarrierType, 9, 8, Bits) \
\
/* System (MRS, MSR) */ \
V_(ImmSystemRegister, 19, 5, Bits) \
V_(SysO0, 19, 19, Bits) \
V_(SysOp1, 18, 16, Bits) \
V_(SysOp2, 7, 5, Bits) \
V_(CRn, 15, 12, Bits) \
V_(CRm, 11, 8, Bits)
#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
/* NZCV */ \
@ -857,6 +857,29 @@ enum LoadStoreRegisterOffset {
#undef LOAD_STORE_REGISTER_OFFSET
};
// Load/store acquire/release
enum LoadStoreAcquireReleaseOp {
LoadStoreAcquireReleaseFixed = 0x08000000,
LoadStoreAcquireReleaseFMask = 0x3F000000,
LoadStoreAcquireReleaseMask = 0xCFC08000,
STLXR_b = LoadStoreAcquireReleaseFixed | 0x00008000,
LDAXR_b = LoadStoreAcquireReleaseFixed | 0x00408000,
STLR_b = LoadStoreAcquireReleaseFixed | 0x00808000,
LDAR_b = LoadStoreAcquireReleaseFixed | 0x00C08000,
STLXR_h = LoadStoreAcquireReleaseFixed | 0x40008000,
LDAXR_h = LoadStoreAcquireReleaseFixed | 0x40408000,
STLR_h = LoadStoreAcquireReleaseFixed | 0x40808000,
LDAR_h = LoadStoreAcquireReleaseFixed | 0x40C08000,
STLXR_w = LoadStoreAcquireReleaseFixed | 0x80008000,
LDAXR_w = LoadStoreAcquireReleaseFixed | 0x80408000,
STLR_w = LoadStoreAcquireReleaseFixed | 0x80808000,
LDAR_w = LoadStoreAcquireReleaseFixed | 0x80C08000,
STLXR_x = LoadStoreAcquireReleaseFixed | 0xC0008000,
LDAXR_x = LoadStoreAcquireReleaseFixed | 0xC0408000,
STLR_x = LoadStoreAcquireReleaseFixed | 0xC0808000,
LDAR_x = LoadStoreAcquireReleaseFixed | 0xC0C08000,
};
// Conditional compare.
enum ConditionalCompareOp {
ConditionalCompareMask = 0x60000000,

18
deps/v8/src/arm64/cpu-arm64.cc

@ -58,14 +58,16 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
"dc cvau, %[dline] \n\t"
// dc : Data Cache maintenance
// c : Clean
// i : Invalidate
// va : by (Virtual) Address
// c : to the point of Coherency
// See ARM DDI 0406B page B2-12 for more information.
// We would prefer to use "cvau" (clean to the point of unification) here
// but we use "civac" to work around Cortex-A53 errata 819472, 826319,
// 827319 and 824069.
"dc civac, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save