Browse Source

deps: upgrade v8 to 3.26.33

Signed-off-by: Fedor Indutny <fedor@indutny.com>
archived-io.js-v0.10
Ben Noordhuis 11 years ago
committed by Fedor Indutny
parent
commit
3a280b2034
  1. 48
      deps/v8/.DEPS.git
  2. 4
      deps/v8/.gitignore
  3. 873
      deps/v8/BUILD.gn
  4. 305
      deps/v8/ChangeLog
  5. 2
      deps/v8/DEPS
  6. 30
      deps/v8/Makefile
  7. 8
      deps/v8/Makefile.android
  8. 3
      deps/v8/Makefile.nacl
  9. 1
      deps/v8/OWNERS
  10. 7
      deps/v8/PRESUBMIT.py
  11. 2
      deps/v8/build/android.gypi
  12. 8
      deps/v8/build/features.gypi
  13. 3
      deps/v8/build/gyp_v8
  14. 11
      deps/v8/build/standalone.gypi
  15. 338
      deps/v8/build/toolchain.gypi
  16. 62
      deps/v8/include/v8-debug.h
  17. 27
      deps/v8/include/v8-platform.h
  18. 66
      deps/v8/include/v8-profiler.h
  19. 27
      deps/v8/include/v8-testing.h
  20. 345
      deps/v8/include/v8-util.h
  21. 239
      deps/v8/include/v8.h
  22. 27
      deps/v8/include/v8config.h
  23. 27
      deps/v8/include/v8stdint.h
  24. 14
      deps/v8/samples/lineprocessor.cc
  25. 1227
      deps/v8/src/accessors.cc
  26. 147
      deps/v8/src/accessors.h
  27. 29
      deps/v8/src/allocation-site-scopes.cc
  28. 27
      deps/v8/src/allocation-site-scopes.h
  29. 38
      deps/v8/src/allocation-tracker.cc
  30. 27
      deps/v8/src/allocation-tracker.h
  31. 27
      deps/v8/src/allocation.cc
  32. 27
      deps/v8/src/allocation.h
  33. 1066
      deps/v8/src/api.cc
  34. 35
      deps/v8/src/api.h
  35. 61
      deps/v8/src/apinatives.js
  36. 27
      deps/v8/src/arguments.cc
  37. 38
      deps/v8/src/arguments.h
  38. 14
      deps/v8/src/arm/assembler-arm-inl.h
  39. 80
      deps/v8/src/arm/assembler-arm.cc
  40. 27
      deps/v8/src/arm/assembler-arm.h
  41. 65
      deps/v8/src/arm/builtins-arm.cc
  42. 929
      deps/v8/src/arm/code-stubs-arm.cc
  43. 52
      deps/v8/src/arm/code-stubs-arm.h
  44. 101
      deps/v8/src/arm/codegen-arm.cc
  45. 27
      deps/v8/src/arm/codegen-arm.h
  46. 27
      deps/v8/src/arm/constants-arm.cc
  47. 27
      deps/v8/src/arm/constants-arm.h
  48. 37
      deps/v8/src/arm/cpu-arm.cc
  49. 68
      deps/v8/src/arm/debug-arm.cc
  50. 32
      deps/v8/src/arm/deoptimizer-arm.cc
  51. 29
      deps/v8/src/arm/disasm-arm.cc
  52. 27
      deps/v8/src/arm/frames-arm.cc
  53. 27
      deps/v8/src/arm/frames-arm.h
  54. 279
      deps/v8/src/arm/full-codegen-arm.cc
  55. 27
      deps/v8/src/arm/ic-arm.cc
  56. 249
      deps/v8/src/arm/lithium-arm.cc
  57. 69
      deps/v8/src/arm/lithium-arm.h
  58. 412
      deps/v8/src/arm/lithium-codegen-arm.cc
  59. 38
      deps/v8/src/arm/lithium-codegen-arm.h
  60. 98
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  61. 31
      deps/v8/src/arm/lithium-gap-resolver-arm.h
  62. 153
      deps/v8/src/arm/macro-assembler-arm.cc
  63. 46
      deps/v8/src/arm/macro-assembler-arm.h
  64. 31
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  65. 27
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  66. 33
      deps/v8/src/arm/simulator-arm.cc
  67. 27
      deps/v8/src/arm/simulator-arm.h
  68. 92
      deps/v8/src/arm/stub-cache-arm.cc
  69. 49
      deps/v8/src/arm64/assembler-arm64-inl.h
  70. 178
      deps/v8/src/arm64/assembler-arm64.cc
  71. 91
      deps/v8/src/arm64/assembler-arm64.h
  72. 67
      deps/v8/src/arm64/builtins-arm64.cc
  73. 1045
      deps/v8/src/arm64/code-stubs-arm64.cc
  74. 78
      deps/v8/src/arm64/code-stubs-arm64.h
  75. 69
      deps/v8/src/arm64/codegen-arm64.cc
  76. 27
      deps/v8/src/arm64/codegen-arm64.h
  77. 34
      deps/v8/src/arm64/constants-arm64.h
  78. 113
      deps/v8/src/arm64/cpu-arm64.cc
  79. 52
      deps/v8/src/arm64/cpu-arm64.h
  80. 67
      deps/v8/src/arm64/debug-arm64.cc
  81. 27
      deps/v8/src/arm64/decoder-arm64-inl.h
  82. 27
      deps/v8/src/arm64/decoder-arm64.cc
  83. 27
      deps/v8/src/arm64/decoder-arm64.h
  84. 86
      deps/v8/src/arm64/deoptimizer-arm64.cc
  85. 30
      deps/v8/src/arm64/disasm-arm64.cc
  86. 27
      deps/v8/src/arm64/disasm-arm64.h
  87. 27
      deps/v8/src/arm64/frames-arm64.cc
  88. 27
      deps/v8/src/arm64/frames-arm64.h
  89. 307
      deps/v8/src/arm64/full-codegen-arm64.cc
  90. 28
      deps/v8/src/arm64/ic-arm64.cc
  91. 42
      deps/v8/src/arm64/instructions-arm64.cc
  92. 46
      deps/v8/src/arm64/instructions-arm64.h
  93. 27
      deps/v8/src/arm64/instrument-arm64.cc
  94. 27
      deps/v8/src/arm64/instrument-arm64.h
  95. 436
      deps/v8/src/arm64/lithium-arm64.cc
  96. 175
      deps/v8/src/arm64/lithium-arm64.h
  97. 771
      deps/v8/src/arm64/lithium-codegen-arm64.cc
  98. 100
      deps/v8/src/arm64/lithium-codegen-arm64.h
  99. 27
      deps/v8/src/arm64/lithium-gap-resolver-arm64.cc
  100. 27
      deps/v8/src/arm64/lithium-gap-resolver-arm64.h

48
deps/v8/.DEPS.git

@ -0,0 +1,48 @@
# DO NOT EDIT EXCEPT FOR LOCAL TESTING.
# THIS IS A GENERATED FILE.
# ALL MANUAL CHANGES WILL BE OVERWRITTEN.
# SEE http://code.google.com/p/chromium/wiki/UsingGit
# FOR HOW TO ROLL DEPS
vars = {
'webkit_url':
'https://chromium.googlesource.com/chromium/blink.git',
'git_url':
'https://chromium.googlesource.com'
}
deps = {
'v8/build/gyp':
Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852',
'v8/third_party/icu':
Var('git_url') + '/chromium/deps/icu46.git@7a1ec88f69e25b3efcf76196d07f7815255db025',
}
deps_os = {
'win':
{
'v8/third_party/cygwin':
Var('git_url') + '/chromium/deps/cygwin.git@06a117a90c15174436bfa20ceebbfdf43b7eb820',
'v8/third_party/python_26':
Var('git_url') + '/chromium/deps/python_26.git@67d19f904470effe3122d27101cc5a8195abd157',
},
}
include_rules = [
]
skip_child_includes = [
]
hooks = [
{
'action':
[
'python',
'v8/build/gyp_v8'
],
'pattern':
'.'
}
]

4
deps/v8/.gitignore

@ -46,6 +46,9 @@ shell_g
/test/mozilla/CHECKED_OUT_VERSION /test/mozilla/CHECKED_OUT_VERSION
/test/mozilla/data /test/mozilla/data
/test/mozilla/downloaded_* /test/mozilla/downloaded_*
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/test262/data /test/test262/data
/test/test262/tc39-test262-* /test/test262/tc39-test262-*
/third_party /third_party
@ -63,3 +66,4 @@ GTAGS
GRTAGS GRTAGS
GSYMS GSYMS
GPATH GPATH
gtags.files

873
deps/v8/BUILD.gn

@ -0,0 +1,873 @@
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(jochen): These will need to be user-settable to support standalone V8
# builds.
v8_compress_startup_data = "off"
v8_deprecation_warnings = false
v8_enable_disassembler = false
v8_enable_gdbjit = false
v8_enable_handle_zapping = true
v8_enable_i18n_support = true
v8_enable_verify_heap = false
v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_default_platform = true
v8_use_snapshot = true
if (is_debug) {
v8_enable_extra_checks = true
} else {
v8_enable_extra_checks = false
}
# TODO(jochen): Add support for want_seperate_host_toolset.
# TODO(jochen): Add toolchain.gypi support.
###############################################################################
# Configurations
#
config("internal_config") {
visibility = ":*" # Only targets in this file can depend on this.
include_dirs = [ "src" ]
if (component_mode == "shared_library") {
defines = [
"BUILDING_V8_SHARED",
"V8_SHARED",
]
}
}
config("features") {
visibility = ":*" # Only targets in this file can depend on this.
defines = []
if (v8_enable_disassembler == true) {
defines += [
"ENABLE_DISASSEMBLER",
]
}
if (v8_enable_gdbjit == true) {
defines += [
"ENABLE_GDB_JIT_INTERFACE",
]
}
if (v8_object_print == true) {
defines += [
"OBJECT_PRINT",
]
}
if (v8_enable_verify_heap == true) {
defines += [
"VERIFY_HEAP",
]
}
if (v8_interpreted_regexp == true) {
defines += [
"V8_INTERPRETED_REGEXP",
]
}
if (v8_deprecation_warnings == true) {
defines += [
"V8_DEPRECATION_WARNINGS",
]
}
if (v8_enable_i18n_support == true) {
defines += [
"V8_I18N_SUPPORT",
]
}
if (v8_use_default_platform == true) {
defines += [
"V8_USE_DEFAULT_PLATFORM",
]
}
if (v8_compress_startup_data == "bz2") {
defines += [
"COMPRESS_STARTUP_DATA_BZ2",
]
}
if (v8_enable_extra_checks == true) {
defines += [
"ENABLE_EXTRA_CHECKS",
]
}
if (v8_enable_handle_zapping == true) {
defines += [
"ENABLE_HANDLE_ZAPPING",
]
}
}
###############################################################################
# Actions
#
# TODO(jochen): Do actions need visibility settings as well?
action("generate_trig_table") {
visibility = ":*" # Only targets in this file can depend on this.
script = "tools/generate-trig-table.py"
outputs = [
"$target_gen_dir/trig-table.cc"
]
args = rebase_path(outputs, root_build_dir)
}
action("js2c") {
visibility = ":*" # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
source_prereqs = [ "tools/jsmin.py" ]
sources = [
"src/runtime.js",
"src/v8natives.js",
"src/array.js",
"src/string.js",
"src/uri.js",
"src/math.js",
"src/messages.js",
"src/apinatives.js",
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/date.js",
"src/json.js",
"src/regexp.js",
"src/arraybuffer.js",
"src/typedarray.js",
"src/object-observe.js",
"src/macros.py",
]
outputs = [
"$target_gen_dir/libraries.cc"
]
if (v8_enable_i18n_support) {
sources += [ "src/i18n.js" ]
}
args =
rebase_path(outputs, root_build_dir) +
[ "EXPERIMENTAL", v8_compress_startup_data ] +
rebase_path(sources, root_build_dir)
}
action("js2c_experimental") {
visibility = ":*" # Only targets in this file can depend on this.
script = "tools/js2c.py"
# The script depends on this other script, this rule causes a rebuild if it
# changes.
source_prereqs = [ "tools/jsmin.py" ]
sources = [
"src/macros.py",
"src/symbol.js",
"src/proxy.js",
"src/collection.js",
"src/weak_collection.js",
"src/promise.js",
"src/generator.js",
"src/array-iterator.js",
"src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-math.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc"
]
args =
rebase_path(outputs, root_build_dir) +
[ "CORE", v8_compress_startup_data ] +
rebase_path(sources, root_build_dir)
}
action("postmortem-metadata") {
visibility = ":*" # Only targets in this file can depend on this.
script = "tools/gen-postmortem-metadata.py"
sources = [
"src/objects.h",
"src/objects-inl.h",
]
outputs = [
"$target_gen_dir/debug-support.cc"
]
args =
rebase_path(outputs, root_build_dir) +
rebase_path(sources, root_build_dir)
}
###############################################################################
# Source Sets (aka static libraries)
#
source_set("v8_nosnapshot") {
visibility = ":*" # Only targets in this file can depend on this.
deps = [
":js2c",
":js2c_experimental",
":generate_trig_table",
":v8_base",
]
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/trig-table.cc",
"src/snapshot-empty.cc",
]
configs += [ ":internal_config", ":features" ]
}
source_set("v8_base") {
visibility = ":*" # Only targets in this file can depend on this.
sources = [
"src/accessors.cc",
"src/accessors.h",
"src/allocation.cc",
"src/allocation.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
"src/allocation-tracker.cc",
"src/allocation-tracker.h",
"src/api.cc",
"src/api.h",
"src/arguments.cc",
"src/arguments.h",
"src/assembler.cc",
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
"src/ast.cc",
"src/ast.h",
"src/atomicops.h",
"src/atomicops_internals_x86_gcc.cc",
"src/bignum-dtoa.cc",
"src/bignum-dtoa.h",
"src/bignum.cc",
"src/bignum.h",
"src/bootstrapper.cc",
"src/bootstrapper.h",
"src/builtins.cc",
"src/builtins.h",
"src/bytecodes-irregexp.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/char-predicates-inl.h",
"src/char-predicates.h",
"src/checks.cc",
"src/checks.h",
"src/circular-queue-inl.h",
"src/circular-queue.h",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/code-stubs-hydrogen.cc",
"src/code.h",
"src/codegen.cc",
"src/codegen.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compiler.cc",
"src/compiler.h",
"src/contexts.cc",
"src/contexts.h",
"src/conversions-inl.h",
"src/conversions.cc",
"src/conversions.h",
"src/counters.cc",
"src/counters.h",
"src/cpu-profiler-inl.h",
"src/cpu-profiler.cc",
"src/cpu-profiler.h",
"src/cpu.cc",
"src/cpu.h",
"src/data-flow.cc",
"src/data-flow.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
"src/dateparser.cc",
"src/dateparser.h",
"src/debug-agent.cc",
"src/debug-agent.h",
"src/debug.cc",
"src/debug.h",
"src/deoptimizer.cc",
"src/deoptimizer.h",
"src/disasm.h",
"src/disassembler.cc",
"src/disassembler.h",
"src/diy-fp.cc",
"src/diy-fp.h",
"src/double.h",
"src/dtoa.cc",
"src/dtoa.h",
"src/effects.h",
"src/elements-kind.cc",
"src/elements-kind.h",
"src/elements.cc",
"src/elements.h",
"src/execution.cc",
"src/execution.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
"src/extensions/free-buffer-extension.h",
"src/extensions/gc-extension.cc",
"src/extensions/gc-extension.h",
"src/extensions/statistics-extension.cc",
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.cc",
"src/extensions/trigger-failure-extension.h",
"src/factory.cc",
"src/factory.h",
"src/fast-dtoa.cc",
"src/fast-dtoa.h",
"src/feedback-slots.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
"src/flags.cc",
"src/flags.h",
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
"src/full-codegen.cc",
"src/full-codegen.h",
"src/func-name-inferrer.cc",
"src/func-name-inferrer.h",
"src/gdb-jit.cc",
"src/gdb-jit.h",
"src/global-handles.cc",
"src/global-handles.h",
"src/globals.h",
"src/handles-inl.h",
"src/handles.cc",
"src/handles.h",
"src/hashmap.h",
"src/heap-inl.h",
"src/heap-profiler.cc",
"src/heap-profiler.h",
"src/heap-snapshot-generator-inl.h",
"src/heap-snapshot-generator.cc",
"src/heap-snapshot-generator.h",
"src/heap.cc",
"src/heap.h",
"src/hydrogen-alias-analysis.h",
"src/hydrogen-bce.cc",
"src/hydrogen-bce.h",
"src/hydrogen-bch.cc",
"src/hydrogen-bch.h",
"src/hydrogen-canonicalize.cc",
"src/hydrogen-canonicalize.h",
"src/hydrogen-check-elimination.cc",
"src/hydrogen-check-elimination.h",
"src/hydrogen-dce.cc",
"src/hydrogen-dce.h",
"src/hydrogen-dehoist.cc",
"src/hydrogen-dehoist.h",
"src/hydrogen-environment-liveness.cc",
"src/hydrogen-environment-liveness.h",
"src/hydrogen-escape-analysis.cc",
"src/hydrogen-escape-analysis.h",
"src/hydrogen-flow-engine.h",
"src/hydrogen-instructions.cc",
"src/hydrogen-instructions.h",
"src/hydrogen.cc",
"src/hydrogen.h",
"src/hydrogen-gvn.cc",
"src/hydrogen-gvn.h",
"src/hydrogen-infer-representation.cc",
"src/hydrogen-infer-representation.h",
"src/hydrogen-infer-types.cc",
"src/hydrogen-infer-types.h",
"src/hydrogen-load-elimination.cc",
"src/hydrogen-load-elimination.h",
"src/hydrogen-mark-deoptimize.cc",
"src/hydrogen-mark-deoptimize.h",
"src/hydrogen-mark-unreachable.cc",
"src/hydrogen-mark-unreachable.h",
"src/hydrogen-osr.cc",
"src/hydrogen-osr.h",
"src/hydrogen-range-analysis.cc",
"src/hydrogen-range-analysis.h",
"src/hydrogen-redundant-phi.cc",
"src/hydrogen-redundant-phi.h",
"src/hydrogen-removable-simulates.cc",
"src/hydrogen-removable-simulates.h",
"src/hydrogen-representation-changes.cc",
"src/hydrogen-representation-changes.h",
"src/hydrogen-sce.cc",
"src/hydrogen-sce.h",
"src/hydrogen-store-elimination.cc",
"src/hydrogen-store-elimination.h",
"src/hydrogen-uint32-analysis.cc",
"src/hydrogen-uint32-analysis.h",
"src/i18n.cc",
"src/i18n.h",
"src/icu_util.cc",
"src/icu_util.h",
"src/ic-inl.h",
"src/ic.cc",
"src/ic.h",
"src/incremental-marking.cc",
"src/incremental-marking.h",
"src/interface.cc",
"src/interface.h",
"src/interpreter-irregexp.cc",
"src/interpreter-irregexp.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
"src/json-stringifier.h",
"src/jsregexp-inl.h",
"src/jsregexp.cc",
"src/jsregexp.h",
"src/lazy-instance.h",
# TODO(jochen): move libplatform/ files to their own target.
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
"src/libplatform/task-queue.h",
"src/libplatform/worker-thread.cc",
"src/libplatform/worker-thread.h",
"src/list-inl.h",
"src/list.h",
"src/lithium-allocator-inl.h",
"src/lithium-allocator.cc",
"src/lithium-allocator.h",
"src/lithium-codegen.cc",
"src/lithium-codegen.h",
"src/lithium.cc",
"src/lithium.h",
"src/liveedit.cc",
"src/liveedit.h",
"src/log-inl.h",
"src/log-utils.cc",
"src/log-utils.h",
"src/log.cc",
"src/log.h",
"src/macro-assembler.h",
"src/mark-compact.cc",
"src/mark-compact.h",
"src/messages.cc",
"src/messages.h",
"src/msan.h",
"src/natives.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
"src/objects-visiting.cc",
"src/objects-visiting.h",
"src/objects.cc",
"src/objects.h",
"src/once.cc",
"src/once.h",
"src/optimizing-compiler-thread.h",
"src/optimizing-compiler-thread.cc",
"src/parser.cc",
"src/parser.h",
"src/platform/elapsed-timer.h",
"src/platform/time.cc",
"src/platform/time.h",
"src/platform.h",
"src/platform/condition-variable.cc",
"src/platform/condition-variable.h",
"src/platform/mutex.cc",
"src/platform/mutex.h",
"src/platform/semaphore.cc",
"src/platform/semaphore.h",
"src/platform/socket.cc",
"src/platform/socket.h",
"src/preparse-data-format.h",
"src/preparse-data.cc",
"src/preparse-data.h",
"src/preparser.cc",
"src/preparser.h",
"src/prettyprinter.cc",
"src/prettyprinter.h",
"src/profile-generator-inl.h",
"src/profile-generator.cc",
"src/profile-generator.h",
"src/property-details.h",
"src/property.cc",
"src/property.h",
"src/regexp-macro-assembler-irregexp-inl.h",
"src/regexp-macro-assembler-irregexp.cc",
"src/regexp-macro-assembler-irregexp.h",
"src/regexp-macro-assembler-tracer.cc",
"src/regexp-macro-assembler-tracer.h",
"src/regexp-macro-assembler.cc",
"src/regexp-macro-assembler.h",
"src/regexp-stack.cc",
"src/regexp-stack.h",
"src/rewriter.cc",
"src/rewriter.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime.cc",
"src/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
"src/sampler.cc",
"src/sampler.h",
"src/scanner-character-streams.cc",
"src/scanner-character-streams.h",
"src/scanner.cc",
"src/scanner.h",
"src/scopeinfo.cc",
"src/scopeinfo.h",
"src/scopes.cc",
"src/scopes.h",
"src/serialize.cc",
"src/serialize.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
"src/snapshot-common.cc",
"src/snapshot.h",
"src/spaces-inl.h",
"src/spaces.cc",
"src/spaces.h",
"src/store-buffer-inl.h",
"src/store-buffer.cc",
"src/store-buffer.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
"src/strtod.cc",
"src/strtod.h",
"src/stub-cache.cc",
"src/stub-cache.h",
"src/sweeper-thread.h",
"src/sweeper-thread.cc",
"src/token.cc",
"src/token.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
"src/type-info.cc",
"src/type-info.h",
"src/types-inl.h",
"src/types.cc",
"src/types.h",
"src/typing.cc",
"src/typing.h",
"src/unbound-queue-inl.h",
"src/unbound-queue.h",
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
"src/unique.h",
"src/uri.h",
"src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
"src/utils/random-number-generator.cc",
"src/utils/random-number-generator.h",
"src/v8.cc",
"src/v8.h",
"src/v8checks.h",
"src/v8globals.h",
"src/v8memory.h",
"src/v8threads.cc",
"src/v8threads.h",
"src/variables.cc",
"src/variables.h",
"src/version.cc",
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/zone-inl.h",
"src/zone.cc",
"src/zone.h",
]
if (cpu_arch == "x86") {
sources += [
"src/ia32/assembler-ia32-inl.h",
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/builtins-ia32.cc",
"src/ia32/code-stubs-ia32.cc",
"src/ia32/code-stubs-ia32.h",
"src/ia32/codegen-ia32.cc",
"src/ia32/codegen-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/debug-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
"src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h",
"src/ia32/full-codegen-ia32.cc",
"src/ia32/ic-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h",
"src/ia32/lithium-gap-resolver-ia32.cc",
"src/ia32/lithium-gap-resolver-ia32.h",
"src/ia32/lithium-ia32.cc",
"src/ia32/lithium-ia32.h",
"src/ia32/macro-assembler-ia32.cc",
"src/ia32/macro-assembler-ia32.h",
"src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h",
"src/ia32/stub-cache-ia32.cc",
]
} else if (cpu_arch == "x64") {
sources += [
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
"src/x64/builtins-x64.cc",
"src/x64/code-stubs-x64.cc",
"src/x64/code-stubs-x64.h",
"src/x64/codegen-x64.cc",
"src/x64/codegen-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/debug-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
"src/x64/frames-x64.cc",
"src/x64/frames-x64.h",
"src/x64/full-codegen-x64.cc",
"src/x64/ic-x64.cc",
"src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h",
"src/x64/lithium-gap-resolver-x64.cc",
"src/x64/lithium-gap-resolver-x64.h",
"src/x64/lithium-x64.cc",
"src/x64/lithium-x64.h",
"src/x64/macro-assembler-x64.cc",
"src/x64/macro-assembler-x64.h",
"src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h",
"src/x64/stub-cache-x64.cc",
]
} else if (cpu_arch == "arm") {
sources += [
"src/arm/assembler-arm-inl.h",
"src/arm/assembler-arm.cc",
"src/arm/assembler-arm.h",
"src/arm/builtins-arm.cc",
"src/arm/code-stubs-arm.cc",
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
"src/arm/codegen-arm.h",
"src/arm/constants-arm.h",
"src/arm/constants-arm.cc",
"src/arm/cpu-arm.cc",
"src/arm/debug-arm.cc",
"src/arm/deoptimizer-arm.cc",
"src/arm/disasm-arm.cc",
"src/arm/frames-arm.cc",
"src/arm/frames-arm.h",
"src/arm/full-codegen-arm.cc",
"src/arm/ic-arm.cc",
"src/arm/lithium-arm.cc",
"src/arm/lithium-arm.h",
"src/arm/lithium-codegen-arm.cc",
"src/arm/lithium-codegen-arm.h",
"src/arm/lithium-gap-resolver-arm.cc",
"src/arm/lithium-gap-resolver-arm.h",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
"src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
"src/arm/stub-cache-arm.cc",
]
} else if (cpu_arch == "arm64") {
sources += [
"src/arm64/assembler-arm64.cc",
"src/arm64/assembler-arm64.h",
"src/arm64/assembler-arm64-inl.h",
"src/arm64/builtins-arm64.cc",
"src/arm64/codegen-arm64.cc",
"src/arm64/codegen-arm64.h",
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/cpu-arm64.h",
"src/arm64/debug-arm64.cc",
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
"src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h",
"src/arm64/full-codegen-arm64.cc",
"src/arm64/ic-arm64.cc",
"src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc",
"src/arm64/instrument-arm64.h",
"src/arm64/lithium-arm64.cc",
"src/arm64/lithium-arm64.h",
"src/arm64/lithium-codegen-arm64.cc",
"src/arm64/lithium-codegen-arm64.h",
"src/arm64/lithium-gap-resolver-arm64.cc",
"src/arm64/lithium-gap-resolver-arm64.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/regexp-macro-assembler-arm64.cc",
"src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/stub-cache-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
]
} else if (cpu_arch == "mipsel") {
sources += [
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/assembler-mips-inl.h",
"src/mips/builtins-mips.cc",
"src/mips/codegen-mips.cc",
"src/mips/codegen-mips.h",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
"src/mips/debug-mips.cc",
"src/mips/deoptimizer-mips.cc",
"src/mips/disasm-mips.cc",
"src/mips/frames-mips.cc",
"src/mips/frames-mips.h",
"src/mips/full-codegen-mips.cc",
"src/mips/ic-mips.cc",
"src/mips/lithium-codegen-mips.cc",
"src/mips/lithium-codegen-mips.h",
"src/mips/lithium-gap-resolver-mips.cc",
"src/mips/lithium-gap-resolver-mips.h",
"src/mips/lithium-mips.cc",
"src/mips/lithium-mips.h",
"src/mips/macro-assembler-mips.cc",
"src/mips/macro-assembler-mips.h",
"src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc",
"src/mips/stub-cache-mips.cc",
]
}
configs += [ ":internal_config", ":features" ]
defines = []
deps = []
if (is_posix) {
sources += [
"src/platform-posix.cc"
]
}
if (is_linux) {
sources += [
"src/platform-linux.cc"
]
# TODO(brettw)
# 'conditions': [
# ['v8_compress_startup_data=="bz2"', {
# 'libraries': [
# '-lbz2',
# ]
# }],
# ],
libs = [ "rt" ]
} else if (is_android) {
# TODO(brettw) OS=="android" condition from tools/gyp/v8.gyp
} else if (is_mac) {
sources += [ "src/platform-macos,cc" ]
} else if (is_win) {
sources += [
"src/platform-win32.cc",
"src/win32-math.cc",
"src/win32-math.h",
]
defines += [ "_CRT_RAND_S" ] # for rand_s()
libs = [ "winmm.lib", "ws2_32.lib" ]
}
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
} else {
sources -= [
"src/i18n.cc",
"src/i18n.h",
]
}
# TODO(brettw) other conditions from v8.gyp
# TODO(brettw) icu_use_data_file_flag
}
###############################################################################
# Executables
#
# TODO(jochen): Remove this as soon as toolchain.gypi is integrated.
if (build_cpu_arch != cpu_arch) {
executable("mksnapshot") {
sources = [
]
}
} else {
executable("mksnapshot") {
sources = [
"src/mksnapshot.cc",
]
configs += [ ":internal_config", ":features" ]
deps = [
":v8_base",
":v8_nosnapshot",
]
if (v8_compress_startup_data == "bz2") {
libs = [ "bz2" ]
}
}
}

305
deps/v8/ChangeLog

@ -1,3 +1,308 @@
2014-05-08: Version 3.26.33
Removed default Isolate (Chromium issue 359977).
Performance and stability improvements on all platforms.
2014-05-07: Version 3.26.32
Performance and stability improvements on all platforms.
2014-05-06: Version 3.26.31
Add a basic gn file for V8.
Performance and stability improvements on all platforms.
2014-05-05: Version 3.26.30
Introduce a microtask suppression scope and move microtask methods to
isolate (Chromium issue 369503).
Re-enable Object.observe and add enforcement for security invariants.
Move cache line size calculation directly into CPU::FlushICache
(Chromium issue 359977).
Generation of our home-grown memmove doesn't depend on serializer state
anymore (Chromium issue 359977).
Fix |RunMicrotasks()| leaking reference to the last context being run
on.
Object.defineProperty shouldn't be a hint that we're constructing a
dictionary (Chromium issue 362870).
Performance and stability improvements on all platforms.
2014-05-01: Version 3.26.29
Added a Isolate* parameter to Serializer::enabled() (Chromium issue
359977).
ES6: Add support for Array.prototype.fill() (issue 3273).
Performance and stability improvements on all platforms.
2014-04-29: Version 3.26.28
PromiseThen should ignore non-function parameters (Chromium issue
347455).
Performance and stability improvements on all platforms.
2014-04-29: Version 3.26.27
Error stack getter should not overwrite itself with a data property
(issue 3294).
Performance and stability improvements on all platforms.
2014-04-28: Version 3.26.26
Expose promise value through promise mirror (issue 3093).
Simplified CPU/CpuFeatures a bit (Chromium issue 359977).
Performance and stability improvements on all platforms.
2014-04-28: Version 3.26.25
Add timestamps to CPU profile samples (Chromium issue 363976).
Expose promise status through promise mirror (issue 3093).
Remove static CallCompletedCallback handlers.
Added an Isolate* field to NoTrackDoubleFieldsForSerializerScope,
PlatformFeatureScope and BinaryOpIC::State (Chromium issue 359977).
Trigger debug event on not yet caught exception in promises (issue
3093).
Unbreak vtunejit=on (issue 3288).
Performance and stability improvements on all platforms.
2014-04-25: Version 3.26.24
MIPS: CodeStubs contain their corresponding Isolate* now. (part 2)
(Chromium issue 359977).
MIPS: CodeStubs contain their corresponding Isolate* now. (part 1)
(Chromium issue 359977).
CodeStubs contain their corresponding Isolate* now. (part 2) (Chromium
issue 359977).
Make DescriptorArray::IsMoreGeneralThan() and DescriptorArray::Merge()
compatible again (Chromium issue 365172).
CodeStubs contain their corresponding Isolate* now. (part 1) (Chromium
issue 359977).
Performance and stability improvements on all platforms.
2014-04-24: Version 3.26.23
Performance and stability improvements on all platforms.
2014-04-23: Version 3.26.22
Disable field type tracking by default (Chromium issue 365172).
Performance and stability improvements on all platforms.
2014-04-23: Version 3.26.21
Context-allocate all parameters in generators (issue 3280).
Simplify v8/Isolate teardown (Chromium issue 359977).
Performance and stability improvements on all platforms.
2014-04-21: Version 3.26.20
ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323).
Performance and stability improvements on all platforms.
2014-04-18: Version 3.26.19
ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323).
Performance and stability improvements on all platforms.
2014-04-17: Version 3.26.18
Removed Isolate::EnterDefaultIsolate (Chromium issue 359977).
Performance and stability improvements on all platforms.
2014-04-16: Version 3.26.17
Clear invalid field maps in PropertyAccessInfo (Chromium issue 363956).
ES6: Add support for Map/Set forEach (Chromium issues 1793, 2323).
Performance and stability improvements on all platforms.
2014-04-16: Version 3.26.16
Removed EnterIsolateIfNeeded and a soon-to-be-useless assertion
(Chromium issue 359977).
Removed GetDefaultIsolate{Debugger,ForLocking,StackGuard} (Chromium
issue 359977).
Performance and stability improvements on all platforms.
2014-04-15: Version 3.26.15
Fix result of LCodeGen::DoWrapReceiver for strict functions and builtins
(Chromium issue 362128).
Performance and stability improvements on all platforms.
2014-04-15: Version 3.26.14
Performance and stability improvements on all platforms.
2014-04-14: Version 3.26.13
Make maps in monomorphic IC stubs weak (issue 2073).
x64: Make sure that the upper half of a 64bit register contains 0 for
int32 values (Chromium issue 360611).
Performance and stability improvements on all platforms.
2014-04-11: Version 3.26.12
Do not use ranges after range analysis (Chromium issue 361608).
Performance and stability improvements on all platforms.
2014-04-10: Version 3.26.11
Performance and stability improvements on all platforms.
2014-04-10: Version 3.26.10
Allow the embedder to pass the virtual memory limit to v8.
Performance and stability improvements on all platforms.
2014-04-09: Version 3.26.9
Fix invalid local property lookup for transitions (Chromium issue
361025).
MIPS: Fixed flooring division by -1 (issue 3259).
Fixed flooring division by -1 on ARM (issue 3259).
Make `String.prototype.contains` throw when passing a regular expression
(issue 3261).
Performance and stability improvements on all platforms.
2014-04-08: Version 3.26.8
Yet another regression test for range analysis (issue 3204).
Performance and stability improvements on all platforms.
2014-04-07: Version 3.26.7
Performance and stability improvements on all platforms.
2014-04-04: Version 3.26.6
Performance and stability improvements on all platforms.
2014-04-03: Version 3.26.5
Performance and stability improvements on all platforms.
2014-04-03: Version 3.26.4
Make stray 'return' an early error.
Show references from weak containers as weak in heap snapshots (Chromium
issue 356590).
Make invalid LHSs that are calls late errors (Chromium issue 358346).
Performance and stability improvements on all platforms.
2014-04-02: Version 3.26.3
Support typed arrays in IsMoreGeneralElementsKindTransition (Chromium
issue 357054).
Remove debugger_auto_break flag.
Store i18n meta data in hidden symbols instead of js accessible
properties (Chromium issue 354967).
Performance and stability improvements on all platforms.
2014-04-01: Version 3.26.2
Performance and stability improvements on all platforms.
2014-04-01: Version 3.26.1
Fix Type::Intersect to skip uninhabited bitsets (Chromium issue 357330).
Fix PrepareKeyedOperand on arm (Chromium issue 358057).
Performance and stability improvements on all platforms.
2014-03-31: Version 3.26.0
Deprecate Start/StopCpuProfiling methods (issue 3213).
Don't crash if we get a timezone change notification on an uninitialized
isolate (Chromium issue 357362).
Performance and stability improvements on all platforms.
2014-03-28: Version 3.25.30 2014-03-28: Version 3.25.30
NativeContext::map_cache reference should be strong in heap snapshots NativeContext::map_cache reference should be strong in heap snapshots

2
deps/v8/DEPS

@ -8,7 +8,7 @@ deps = {
"http://gyp.googlecode.com/svn/trunk@1831", "http://gyp.googlecode.com/svn/trunk@1831",
"v8/third_party/icu": "v8/third_party/icu":
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@258359", "https://src.chromium.org/svn/trunk/deps/third_party/icu46@258359",
} }
deps_os = { deps_os = {

30
deps/v8/Makefile

@ -96,10 +96,6 @@ endif
ifeq ($(optdebug), on) ifeq ($(optdebug), on)
GYPFLAGS += -Dv8_optimized_debug=2 GYPFLAGS += -Dv8_optimized_debug=2
endif endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
endif
# unalignedaccess=on # unalignedaccess=on
ifeq ($(unalignedaccess), on) ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
@ -140,9 +136,9 @@ endif
# asan=/path/to/clang++ # asan=/path/to/clang++
ifneq ($(strip $(asan)),) ifneq ($(strip $(asan)),)
GYPFLAGS += -Dasan=1 GYPFLAGS += -Dasan=1
export CXX="$(asan)" export CXX=$(asan)
export CXX_host="$(asan)" export CXX_host=$(asan)
export LINK="$(asan)" export LINK=$(asan)
export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer" export ASAN_SYMBOLIZER_PATH="$(dir $(asan))llvm-symbolizer"
endif endif
@ -232,7 +228,7 @@ endif
# Architectures and modes to be compiled. Consider these to be internal # Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead). # variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mipsel ARCHES = ia32 x64 arm arm64 mips mipsel
DEFAULT_ARCHES = ia32 x64 arm DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug MODES = release debug optdebug
DEFAULT_MODES = release debug DEFAULT_MODES = release debug
@ -281,10 +277,6 @@ buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \ $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)" builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
mips mips.release mips.debug:
@echo "V8 does not support big-endian MIPS builds at the moment," \
"please use little-endian builds (mipsel)."
# Compile targets. MODES and ARCHES are convenience targets. # Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION: .SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES)) $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@ -409,7 +401,7 @@ native.clean:
rm -rf $(OUTDIR)/native rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean gtags.clean
# GYP file generation targets. # GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS)) OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
@ -467,6 +459,18 @@ grokdump: ia32.release
@cat $(DUMP_FILE).tmpl > $(DUMP_FILE) @cat $(DUMP_FILE).tmpl > $(DUMP_FILE)
@$(OUTDIR)/ia32.release/d8 --dump-heap-constants >> $(DUMP_FILE) @$(OUTDIR)/ia32.release/d8 --dump-heap-constants >> $(DUMP_FILE)
# Support for the GNU GLOBAL Source Code Tag System.
gtags.files: $(GYPFILES) $(ENVFILE)
@find include src test -name '*.h' -o -name '*.cc' -o -name '*.c' > $@
# We need to manually set the stack limit here, to work around bugs in
# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
GPATH GRTAGS GSYMS GTAGS: gtags.files $(shell cat gtags.files 2> /dev/null)
@bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
# Dependencies. # Dependencies.
# Remember to keep these in sync with the DEPS file. # Remember to keep these in sync with the DEPS file.
dependencies: dependencies:

8
deps/v8/Makefile.android

@ -47,20 +47,20 @@ else
endif endif
ifeq ($(ARCH), android_arm) ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm android_target_platform=14
DEFINES += arm_neon=0 arm_version=7 DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi TOOLCHAIN_ARCH = arm-linux-androideabi
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6 TOOLCHAIN_VER = 4.6
else else
ifeq ($(ARCH), android_arm64) ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 DEFINES = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=20
TOOLCHAIN_ARCH = aarch64-linux-android TOOLCHAIN_ARCH = aarch64-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.8 TOOLCHAIN_VER = 4.8
else else
ifeq ($(ARCH), android_mipsel) ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2 DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
@ -68,7 +68,7 @@ else
else else
ifeq ($(ARCH), android_ia32) ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86 TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6 TOOLCHAIN_VER = 4.6

3
deps/v8/Makefile.nacl

@ -77,6 +77,9 @@ GYPENV += host_os=${HOST_OS}
# ICU doesn't support NaCl. # ICU doesn't support NaCl.
GYPENV += v8_enable_i18n_support=0 GYPENV += v8_enable_i18n_support=0
# Disable strict aliasing - v8 code often relies on undefined behavior of C++.
GYPENV += v8_no_strict_aliasing=1
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS)) NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_BUILDS))
.SECONDEXPANSION: .SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here... # For some reason the $$(basename $$@) expansion didn't work here...

1
deps/v8/OWNERS

@ -11,6 +11,7 @@ machenbach@chromium.org
marja@chromium.org marja@chromium.org
mstarzinger@chromium.org mstarzinger@chromium.org
mvstanton@chromium.org mvstanton@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org rossberg@chromium.org
svenpanne@chromium.org svenpanne@chromium.org
titzer@chromium.org titzer@chromium.org

7
deps/v8/PRESUBMIT.py

@ -103,6 +103,13 @@ def CheckChangeOnCommit(input_api, output_api):
def GetPreferredTryMasters(project, change): def GetPreferredTryMasters(project, change):
return { return {
'tryserver.v8': { 'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nosnap_rel': set(['defaulttests']),
'v8_linux_nosnap_dbg': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
'v8_linux_arm_dbg': set(['defaulttests']),
'v8_linux_arm64_rel': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']), 'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']), 'v8_win_rel': set(['defaulttests']),
}, },

2
deps/v8/build/android.gypi

@ -51,7 +51,7 @@
'android_stlport_libs': '<(android_stlport)/libs', 'android_stlport_libs': '<(android_stlport)/libs',
}, { }, {
'variables': { 'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)', 'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/', 'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
}, },
'android_include': '<(android_sysroot)/usr/include', 'android_include': '<(android_sysroot)/usr/include',

8
deps/v8/build/features.gypi

@ -31,8 +31,6 @@
'variables': { 'variables': {
'v8_compress_startup_data%': 'off', 'v8_compress_startup_data%': 'off',
'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0, 'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0, 'v8_enable_gdbjit%': 0,
@ -64,9 +62,6 @@
}, },
'target_defaults': { 'target_defaults': {
'conditions': [ 'conditions': [
['v8_enable_debugger_support==1', {
'defines': ['ENABLE_DEBUGGER_SUPPORT',],
}],
['v8_enable_disassembler==1', { ['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',], 'defines': ['ENABLE_DISASSEMBLER',],
}], }],
@ -98,7 +93,8 @@
}], }],
], # conditions ], # conditions
'configurations': { 'configurations': {
'Debug': { 'DebugBaseCommon': {
'abstract': 1,
'variables': { 'variables': {
'v8_enable_extra_checks%': 1, 'v8_enable_extra_checks%': 1,
'v8_enable_handle_zapping%': 1, 'v8_enable_handle_zapping%': 1,

3
deps/v8/build/gyp_v8

@ -158,7 +158,8 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform. # Generate for the architectures supported on the given platform.
gyp_args = list(args) gyp_args = list(args)
if platform.system() == 'Linux': gyp_generators = os.environ.get('GYP_GENERATORS')
if platform.system() == 'Linux' and gyp_generators != 'ninja':
# Work around for crbug.com/331475. # Work around for crbug.com/331475.
for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')): for f in glob.glob(os.path.join(v8_root, 'out', 'Makefile.*')):
os.unlink(f) os.unlink(f)

11
deps/v8/build/standalone.gypi

@ -55,8 +55,8 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\ '<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;\ s/x86_64/x64/;\
s/amd64/x64/;\ s/amd64/x64/;\
s/aarch64/arm64/;\
s/arm.*/arm/;\ s/arm.*/arm/;\
s/aarch64/arm64/;\
s/mips.*/mipsel/")', s/mips.*/mipsel/")',
}, { }, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
@ -135,9 +135,15 @@
}, },
'default_configuration': 'Debug', 'default_configuration': 'Debug',
'configurations': { 'configurations': {
'Debug': { 'DebugBaseCommon': {
'cflags': [ '-g', '-O0' ], 'cflags': [ '-g', '-O0' ],
}, },
'Optdebug': {
'inherit_from': [ 'DebugBaseCommon', 'DebugBase2' ],
},
'Debug': {
# Xcode insists on this empty entry.
},
'Release': { 'Release': {
# Xcode insists on this empty entry. # Xcode insists on this empty entry.
}, },
@ -321,7 +327,6 @@
'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES', 'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden 'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics 'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor 'GCC_WARN_NON_VIRTUAL_DESTRUCTOR': 'YES', # -Wnon-virtual-dtor
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min # MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)', 'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)',

338
deps/v8/build/toolchain.gypi

@ -278,6 +278,57 @@
'V8_TARGET_ARCH_IA32', 'V8_TARGET_ARCH_IA32',
], ],
}], # v8_target_arch=="ia32" }], # v8_target_arch=="ia32"
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EB'],
'ldflags': ['-EB'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
],
}], # v8_target_arch=="mips"
['v8_target_arch=="mipsel"', { ['v8_target_arch=="mipsel"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_MIPS', 'V8_TARGET_ARCH_MIPS',
@ -380,7 +431,7 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \ or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \ (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mipsel")', { v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
# Check whether the host compiler and target compiler support the # Check whether the host compiler and target compiler support the
# '-m32' option and set it if so. # '-m32' option and set it if so.
'target_conditions': [ 'target_conditions': [
@ -445,138 +496,154 @@
}], }],
], # conditions ], # conditions
'configurations': { 'configurations': {
'Debug': { # Abstract configuration for v8_optimized_debug == 0.
'defines': [ 'DebugBase0': {
'ENABLE_DISASSEMBLER', 'abstract': 1,
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG'
],
'msvs_settings': { 'msvs_settings': {
'VCCLCompilerTool': { 'VCCLCompilerTool': {
'Optimization': '0',
'conditions': [ 'conditions': [
['v8_optimized_debug==0', { ['component=="shared_library"', {
'Optimization': '0', 'RuntimeLibrary': '3', # /MDd
'conditions': [ }, {
['component=="shared_library"', { 'RuntimeLibrary': '1', # /MTd
'RuntimeLibrary': '3', # /MDd
}, {
'RuntimeLibrary': '1', # /MTd
}],
],
}],
['v8_optimized_debug==1', {
'Optimization': '1',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'BasicRuntimeChecks': '0',
'conditions': [
['component=="shared_library"', {
'RuntimeLibrary': '3', # /MDd
}, {
'RuntimeLibrary': '1', # /MTd
}],
],
}],
['v8_optimized_debug==2', {
'Optimization': '2',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'BasicRuntimeChecks': '0',
'conditions': [
['component=="shared_library"', {
'RuntimeLibrary': '3', #/MDd
}, {
'RuntimeLibrary': '1', #/MTd
}],
['v8_target_arch=="x64"', {
# TODO(2207): remove this option once the bug is fixed.
'WholeProgramOptimization': 'true',
}],
],
}], }],
], ],
}, },
'VCLinkerTool': { 'VCLinkerTool': {
'LinkIncremental': '2',
},
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', {
'cflags!': [
'-O0',
'-O3',
'-O2',
'-O1',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
}],
],
}, # DebugBase0
# Abstract configuration for v8_optimized_debug == 1.
'DebugBase1': {
'abstract': 1,
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '1',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'BasicRuntimeChecks': '0',
'conditions': [ 'conditions': [
['v8_optimized_debug==0', { ['component=="shared_library"', {
'LinkIncremental': '2', 'RuntimeLibrary': '3', # /MDd
}], }, {
['v8_optimized_debug==1', { 'RuntimeLibrary': '1', # /MTd
'LinkIncremental': '2',
}],
['v8_optimized_debug==2', {
'LinkIncremental': '1',
'OptimizeReferences': '2',
'EnableCOMDATFolding': '2',
}], }],
], ],
}, },
'VCLinkerTool': {
'LinkIncremental': '2',
},
}, },
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', { OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags!': [
'-Wnon-virtual-dtor', '-Woverloaded-virtual', '-O0',
'<(wno_array_bounds)', '-O3', # TODO(2807) should be -O1.
], '-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O1', # TODO(2807) should be -O3.
],
'conditions': [ 'conditions': [
['v8_optimized_debug==0', { ['gcc_version==44 and clang==0', {
'cflags!': [
'-O0',
'-O3',
'-O2',
'-O1',
'-Os',
],
'cflags': [ 'cflags': [
'-fdata-sections', # Avoid crashes with gcc 4.4 in the v8 test suite.
'-ffunction-sections', '-fno-tree-vrp',
], ],
}], }],
['v8_optimized_debug==1', { ],
'cflags!': [ }],
'-O0', ['OS=="mac"', {
'-O3', # TODO(2807) should be -O1. 'xcode_settings': {
'-O2', 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
'-Os', 'GCC_STRICT_ALIASING': 'YES',
], },
'cflags': [ }],
'-fdata-sections', ],
'-ffunction-sections', }, # DebugBase1
'-O1', # TODO(2807) should be -O3. # Abstract configuration for v8_optimized_debug == 2.
], 'DebugBase2': {
'abstract': 1,
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'BasicRuntimeChecks': '0',
'conditions': [
['component=="shared_library"', {
'RuntimeLibrary': '3', #/MDd
}, {
'RuntimeLibrary': '1', #/MTd
}], }],
['v8_optimized_debug==2', { ['v8_target_arch=="x64"', {
'cflags!': [ # TODO(2207): remove this option once the bug is fixed.
'-O0', 'WholeProgramOptimization': 'true',
'-O1', }],
'-Os', ],
], },
'cflags': [ 'VCLinkerTool': {
'-fdata-sections', 'LinkIncremental': '1',
'-ffunction-sections', 'OptimizeReferences': '2',
], 'EnableCOMDATFolding': '2',
'defines': [ },
'OPTIMIZED_DEBUG' },
], 'conditions': [
'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
# TODO(crbug.com/272548): Avoid -O3 in NaCl OS=="qnx"', {
['nacl_target_arch=="none"', { 'cflags!': [
'cflags': ['-O3'], '-O0',
'cflags!': ['-O2'], '-O1',
}, { '-Os',
'cflags': ['-O2'], ],
'cflags!': ['-O3'], 'cflags': [
}], '-fdata-sections',
], '-ffunction-sections',
],
'defines': [
'OPTIMIZED_DEBUG'
],
'conditions': [
# TODO(crbug.com/272548): Avoid -O3 in NaCl
['nacl_target_arch=="none"', {
'cflags': ['-O3'],
'cflags!': ['-O2'],
}, {
'cflags': ['-O2'],
'cflags!': ['-O3'],
}], }],
['v8_optimized_debug!=0 and gcc_version==44 and clang==0', { ['gcc_version==44 and clang==0', {
'cflags': [ 'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite. # Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp', '-fno-tree-vrp',
@ -584,6 +651,29 @@
}], }],
], ],
}], }],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
'GCC_STRICT_ALIASING': 'YES',
},
}],
],
}, # DebugBase2
# Common settings for the Debug configuration.
'DebugBaseCommon': {
'abstract': 1,
'defines': [
'ENABLE_DISASSEMBLER',
'V8_ENABLE_CHECKS',
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG'
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
OS=="qnx"', {
'cflags': [ '-Woverloaded-virtual', '<(wno_array_bounds)', ],
}],
['OS=="linux" and v8_enable_backtrace==1', { ['OS=="linux" and v8_enable_backtrace==1', {
# Support for backtrace_symbols. # Support for backtrace_symbols.
'ldflags': [ '-rdynamic' ], 'ldflags': [ '-rdynamic' ],
@ -602,17 +692,19 @@
}], }],
], ],
}], }],
['OS=="mac"', { ],
'xcode_settings': { }, # DebugBaseCommon
'conditions': [ 'Debug': {
['v8_optimized_debug==0', { 'inherit_from': ['DebugBaseCommon'],
'GCC_OPTIMIZATION_LEVEL': '0', # -O0 'conditions': [
}, { ['v8_optimized_debug==0', {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3 'inherit_from': ['DebugBase0'],
'GCC_STRICT_ALIASING': 'YES', }],
}], ['v8_optimized_debug==1', {
], 'inherit_from': ['DebugBase1'],
}, }],
['v8_optimized_debug==2', {
'inherit_from': ['DebugBase2'],
}], }],
], ],
}, # Debug }, # Debug

62
deps/v8/include/v8-debug.h

@ -1,29 +1,6 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEBUG_H_ #ifndef V8_V8_DEBUG_H_
#define V8_V8_DEBUG_H_ #define V8_V8_DEBUG_H_
@ -190,32 +167,28 @@ class V8_EXPORT Debug {
Handle<Value> data = Handle<Value>()); Handle<Value> data = Handle<Value>());
// Schedule a debugger break to happen when JavaScript code is run // Schedule a debugger break to happen when JavaScript code is run
// in the given isolate. If no isolate is provided the default // in the given isolate.
// isolate is used. static void DebugBreak(Isolate* isolate);
static void DebugBreak(Isolate* isolate = NULL);
// Remove scheduled debugger break in given isolate if it has not // Remove scheduled debugger break in given isolate if it has not
// happened yet. If no isolate is provided the default isolate is // happened yet.
// used. static void CancelDebugBreak(Isolate* isolate);
static void CancelDebugBreak(Isolate* isolate = NULL);
// Break execution of JavaScript in the given isolate (this method // Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command // can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in // execution on a VM thread. Client data is then passed in
// EventDetails to EventCallback2 at the moment when the VM actually // EventDetails to EventCallback2 at the moment when the VM actually
// stops. If no isolate is provided the default isolate is used. // stops.
static void DebugBreakForCommand(ClientData* data = NULL, static void DebugBreakForCommand(Isolate* isolate, ClientData* data);
Isolate* isolate = NULL);
// TODO(svenpanne) Remove this when Chrome is updated.
static void DebugBreakForCommand(ClientData* data, Isolate* isolate) {
DebugBreakForCommand(isolate, data);
}
// Message based interface. The message protocol is JSON. // Message based interface. The message protocol is JSON.
static void SetMessageHandler2(MessageHandler2 handler); static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is
// used.
// TODO(dcarney): remove
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL,
Isolate* isolate = NULL);
static void SendCommand(Isolate* isolate, static void SendCommand(Isolate* isolate,
const uint16_t* command, int length, const uint16_t* command, int length,
ClientData* client_data = NULL); ClientData* client_data = NULL);
@ -290,7 +263,7 @@ class V8_EXPORT Debug {
* *
* Generally when message arrives V8 may be in one of 3 states: * Generally when message arrives V8 may be in one of 3 states:
* 1. V8 is running script; V8 will automatically interrupt and process all * 1. V8 is running script; V8 will automatically interrupt and process all
* pending messages (however auto_break flag should be enabled); * pending messages;
* 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
* to reading and processing debug messages; * to reading and processing debug messages;
* 3. V8 is not running at all or has called some long-working C++ function; * 3. V8 is not running at all or has called some long-working C++ function;
@ -331,7 +304,12 @@ class V8_EXPORT Debug {
* (default Isolate if not provided). V8 will abort if LiveEdit is * (default Isolate if not provided). V8 will abort if LiveEdit is
* unexpectedly used. LiveEdit is enabled by default. * unexpectedly used. LiveEdit is enabled by default.
*/ */
static void SetLiveEditEnabled(bool enable, Isolate* isolate = NULL); static void SetLiveEditEnabled(Isolate* isolate, bool enable);
// TODO(svenpanne) Remove this when Chrome is updated.
static void SetLiveEditEnabled(bool enable, Isolate* isolate) {
SetLiveEditEnabled(isolate, enable);
}
}; };

27
deps/v8/include/v8-platform.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_PLATFORM_H_ #ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_ #define V8_V8_PLATFORM_H_

66
deps/v8/include/v8-profiler.h

@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_PROFILER_H_ #ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_ #define V8_V8_PROFILER_H_
@ -106,27 +83,35 @@ class V8_EXPORT CpuProfile {
const CpuProfileNode* GetTopDownRoot() const; const CpuProfileNode* GetTopDownRoot() const;
/** /**
* Returns number of samples recorded. The samples are not recorded unless * Returns number of samples recorded. The samples are not recorded unless
* |record_samples| parameter of CpuProfiler::StartCpuProfiling is true. * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
*/ */
int GetSamplesCount() const; int GetSamplesCount() const;
/** /**
* Returns profile node corresponding to the top frame the sample at * Returns profile node corresponding to the top frame the sample at
* the given index. * the given index.
*/ */
const CpuProfileNode* GetSample(int index) const; const CpuProfileNode* GetSample(int index) const;
/** /**
* Returns time when the profile recording started (in microseconds * Returns the timestamp of the sample. The timestamp is the number of
* since the Epoch). * microseconds since some unspecified starting point.
*/ * The point is equal to the starting point used by GetStartTime.
*/
int64_t GetSampleTimestamp(int index) const;
/**
* Returns time when the profile recording was started (in microseconds)
* since some unspecified starting point.
*/
int64_t GetStartTime() const; int64_t GetStartTime() const;
/** /**
* Returns time when the profile recording was stopped (in microseconds * Returns time when the profile recording was stopped (in microseconds)
* since the Epoch). * since some unspecified starting point.
*/ * The point is equal to the starting point used by GetStartTime.
*/
int64_t GetEndTime() const; int64_t GetEndTime() const;
/** /**
@ -164,7 +149,9 @@ class V8_EXPORT CpuProfiler {
void StartProfiling(Handle<String> title, bool record_samples = false); void StartProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StartProfiling instead. */ /** Deprecated. Use StartProfiling instead. */
void StartCpuProfiling(Handle<String> title, bool record_samples = false); V8_DEPRECATED("Use StartProfiling",
void StartCpuProfiling(Handle<String> title,
bool record_samples = false));
/** /**
* Stops collecting CPU profile with a given title and returns it. * Stops collecting CPU profile with a given title and returns it.
@ -173,7 +160,8 @@ class V8_EXPORT CpuProfiler {
CpuProfile* StopProfiling(Handle<String> title); CpuProfile* StopProfiling(Handle<String> title);
/** Deprecated. Use StopProfiling instead. */ /** Deprecated. Use StopProfiling instead. */
const CpuProfile* StopCpuProfiling(Handle<String> title); V8_DEPRECATED("Use StopProfiling",
const CpuProfile* StopCpuProfiling(Handle<String> title));
/** /**
* Tells the profiler whether the embedder is idle. * Tells the profiler whether the embedder is idle.

27
deps/v8/include/v8-testing.h

@ -1,29 +1,6 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_TEST_H_ #ifndef V8_V8_TEST_H_
#define V8_V8_TEST_H_ #define V8_V8_TEST_H_

345
deps/v8/include/v8-util.h

@ -1,35 +1,13 @@
// Copyright 2014 the V8 project authors. All rights reserved. // Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_UTIL_H_ #ifndef V8_UTIL_H_
#define V8_UTIL_H_ #define V8_UTIL_H_
#include "v8.h" #include "v8.h"
#include <map> #include <map>
#include <vector>
/** /**
* Support for Persistent containers. * Support for Persistent containers.
@ -42,6 +20,10 @@ namespace v8 {
typedef uintptr_t PersistentContainerValue; typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0; static const uintptr_t kPersistentContainerNotFound = 0;
enum PersistentContainerCallbackType {
kNotWeak,
kWeak
};
/** /**
@ -92,38 +74,34 @@ class StdMapTraits {
/** /**
* A default trait implementation for PersistentValueMap, which inherits * A default trait implementation for PersistentValueMap, which inherits
* a std:map backing map from StdMapTraits and holds non-weak persistent * a std:map backing map from StdMapTraits and holds non-weak persistent
* objects. * objects and has no special Dispose handling.
* *
* Users have to implement their own dispose trait. * You should not derive from this class, since MapType depends on the
* surrounding class, and hence a subclass cannot simply inherit the methods.
*/ */
template<typename K, typename V> template<typename K, typename V>
class StrongMapTraits : public StdMapTraits<K, V> { class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
public: public:
// Weak callback & friends: // Weak callback & friends:
static const bool kIsWeak = false; static const PersistentContainerCallbackType kCallbackType = kNotWeak;
typedef typename StdMapTraits<K, V>::Impl Impl; typedef PersistentValueMap<K, V, DefaultPersistentValueMapTraits<K, V> >
MapType;
typedef void WeakCallbackDataType; typedef void WeakCallbackDataType;
static WeakCallbackDataType* WeakCallbackParameter( static WeakCallbackDataType* WeakCallbackParameter(
Impl* impl, const K& key, Local<V> value); MapType* map, const K& key, Local<V> value) {
static Impl* ImplFromWeakCallbackData( return NULL;
const WeakCallbackData<V, WeakCallbackDataType>& data); }
static MapType* MapFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data) {
return NULL;
}
static K KeyFromWeakCallbackData( static K KeyFromWeakCallbackData(
const WeakCallbackData<V, WeakCallbackDataType>& data); const WeakCallbackData<V, WeakCallbackDataType>& data) {
static void DisposeCallbackData(WeakCallbackDataType* data); return K();
}; }
static void DisposeCallbackData(WeakCallbackDataType* data) { }
static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) { }
/**
* A default trait implementation for PersistentValueMap, with a std::map
* backing map, non-weak persistents as values, and no special dispose
* handling. Can be used as-is.
*/
template<typename K, typename V>
class DefaultPersistentValueMapTraits : public StrongMapTraits<K, V> {
public:
typedef typename StrongMapTraits<K, V>::Impl Impl;
static void Dispose(Isolate* isolate, UniquePersistent<V> value,
Impl* impl, K key) { }
}; };
@ -140,55 +118,49 @@ class DefaultPersistentValueMapTraits : public StrongMapTraits<K, V> {
template<typename K, typename V, typename Traits> template<typename K, typename V, typename Traits>
class PersistentValueMap { class PersistentValueMap {
public: public:
V8_INLINE explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {} explicit PersistentValueMap(Isolate* isolate) : isolate_(isolate) {}
V8_INLINE ~PersistentValueMap() { Clear(); } ~PersistentValueMap() { Clear(); }
V8_INLINE Isolate* GetIsolate() { return isolate_; } Isolate* GetIsolate() { return isolate_; }
/** /**
* Return size of the map. * Return size of the map.
*/ */
V8_INLINE size_t Size() { return Traits::Size(&impl_); } size_t Size() { return Traits::Size(&impl_); }
/** /**
* Return whether the map holds weak persistents. * Return whether the map holds weak persistents.
*/ */
V8_INLINE bool IsWeak() { return Traits::kIsWeak; } bool IsWeak() { return Traits::kCallbackType != kNotWeak; }
/** /**
* Get value stored in map. * Get value stored in map.
*/ */
V8_INLINE Local<V> Get(const K& key) { Local<V> Get(const K& key) {
return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key))); return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, key)));
} }
/** /**
* Check whether a value is contained in the map. * Check whether a value is contained in the map.
*/ */
V8_INLINE bool Contains(const K& key) { bool Contains(const K& key) {
return Traits::Get(&impl_, key) != 0; return Traits::Get(&impl_, key) != kPersistentContainerNotFound;
} }
/** /**
* Get value stored in map and set it in returnValue. * Get value stored in map and set it in returnValue.
* Return true if a value was found. * Return true if a value was found.
*/ */
V8_INLINE bool SetReturnValue(const K& key, bool SetReturnValue(const K& key,
ReturnValue<Value>& returnValue) { ReturnValue<Value> returnValue) {
PersistentContainerValue value = Traits::Get(&impl_, key); return SetReturnValueFromVal(returnValue, Traits::Get(&impl_, key));
bool hasValue = value != 0;
if (hasValue) {
returnValue.SetInternal(
*reinterpret_cast<internal::Object**>(FromVal(value)));
}
return hasValue;
} }
/** /**
* Call Isolate::SetReference with the given parent and the map value. * Call Isolate::SetReference with the given parent and the map value.
*/ */
V8_INLINE void SetReference(const K& key, void SetReference(const K& key,
const Persistent<Object>& parent) { const Persistent<Object>& parent) {
GetIsolate()->SetReference( GetIsolate()->SetReference(
reinterpret_cast<internal::Object**>(parent.val_), reinterpret_cast<internal::Object**>(parent.val_),
@ -215,7 +187,7 @@ class PersistentValueMap {
/** /**
* Return value for key and remove it from the map. * Return value for key and remove it from the map.
*/ */
V8_INLINE UniquePersistent<V> Remove(const K& key) { UniquePersistent<V> Remove(const K& key) {
return Release(Traits::Remove(&impl_, key)).Pass(); return Release(Traits::Remove(&impl_, key)).Pass();
} }
@ -231,12 +203,76 @@ class PersistentValueMap {
typename Traits::Impl impl; typename Traits::Impl impl;
Traits::Swap(impl_, impl); Traits::Swap(impl_, impl);
for (It i = Traits::Begin(&impl); i != Traits::End(&impl); ++i) { for (It i = Traits::Begin(&impl); i != Traits::End(&impl); ++i) {
Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(), &impl, Traits::Dispose(isolate_, Release(Traits::Value(i)).Pass(),
Traits::Key(i)); Traits::Key(i));
} }
} }
} }
/**
* Helper class for GetReference/SetWithReference. Do not use outside
* that context.
*/
class PersistentValueReference {
public:
PersistentValueReference() : value_(kPersistentContainerNotFound) { }
PersistentValueReference(const PersistentValueReference& other)
: value_(other.value_) { }
Local<V> NewLocal(Isolate* isolate) const {
return Local<V>::New(isolate, FromVal(value_));
}
bool IsEmpty() const {
return value_ == kPersistentContainerNotFound;
}
template<typename T>
bool SetReturnValue(ReturnValue<T> returnValue) {
return SetReturnValueFromVal(returnValue, value_);
}
void Reset() {
value_ = kPersistentContainerNotFound;
}
void operator=(const PersistentValueReference& other) {
value_ = other.value_;
}
private:
friend class PersistentValueMap;
explicit PersistentValueReference(PersistentContainerValue value)
: value_(value) { }
void operator=(PersistentContainerValue value) {
value_ = value;
}
PersistentContainerValue value_;
};
/**
* Get a reference to a map value. This enables fast, repeated access
* to a value stored in the map while the map remains unchanged.
*
* Careful: This is potentially unsafe, so please use with care.
* The value will become invalid if the value for this key changes
* in the underlying map, as a result of Set or Remove for the same
* key; as a result of the weak callback for the same key; or as a
* result of calling Clear() or destruction of the map.
*/
PersistentValueReference GetReference(const K& key) {
return PersistentValueReference(Traits::Get(&impl_, key));
}
/**
* Put a value into the map and update the reference.
* Restrictions of GetReference apply here as well.
*/
UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
PersistentValueReference* reference) {
*reference = Leak(&value);
return SetUnique(key, &value);
}
private: private:
PersistentValueMap(PersistentValueMap&); PersistentValueMap(PersistentValueMap&);
void operator=(PersistentValueMap&); void operator=(PersistentValueMap&);
@ -246,10 +282,10 @@ class PersistentValueMap {
* by the Traits class. * by the Traits class.
*/ */
UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) { UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
if (Traits::kIsWeak) { if (Traits::kCallbackType != kNotWeak) {
Local<V> value(Local<V>::New(isolate_, *persistent)); Local<V> value(Local<V>::New(isolate_, *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>( persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(&impl_, key, value), WeakCallback); Traits::WeakCallbackParameter(this, key, value), WeakCallback);
} }
PersistentContainerValue old_value = PersistentContainerValue old_value =
Traits::Set(&impl_, key, ClearAndLeak(persistent)); Traits::Set(&impl_, key, ClearAndLeak(persistent));
@ -258,34 +294,50 @@ class PersistentValueMap {
static void WeakCallback( static void WeakCallback(
const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) { const WeakCallbackData<V, typename Traits::WeakCallbackDataType>& data) {
if (Traits::kIsWeak) { if (Traits::kCallbackType != kNotWeak) {
typename Traits::Impl* impl = Traits::ImplFromWeakCallbackData(data); PersistentValueMap<K, V, Traits>* persistentValueMap =
Traits::MapFromWeakCallbackData(data);
K key = Traits::KeyFromWeakCallbackData(data); K key = Traits::KeyFromWeakCallbackData(data);
PersistentContainerValue value = Traits::Remove(impl, key); Traits::Dispose(data.GetIsolate(),
Traits::Dispose(data.GetIsolate(), Release(value).Pass(), impl, key); persistentValueMap->Remove(key).Pass(), key);
} }
} }
V8_INLINE static V* FromVal(PersistentContainerValue v) { static V* FromVal(PersistentContainerValue v) {
return reinterpret_cast<V*>(v); return reinterpret_cast<V*>(v);
} }
V8_INLINE static PersistentContainerValue ClearAndLeak( static bool SetReturnValueFromVal(
ReturnValue<Value>& returnValue, PersistentContainerValue value) {
bool hasValue = value != kPersistentContainerNotFound;
if (hasValue) {
returnValue.SetInternal(
*reinterpret_cast<internal::Object**>(FromVal(value)));
}
return hasValue;
}
static PersistentContainerValue ClearAndLeak(
UniquePersistent<V>* persistent) { UniquePersistent<V>* persistent) {
V* v = persistent->val_; V* v = persistent->val_;
persistent->val_ = 0; persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v); return reinterpret_cast<PersistentContainerValue>(v);
} }
static PersistentContainerValue Leak(
UniquePersistent<V>* persistent) {
return reinterpret_cast<PersistentContainerValue>(persistent->val_);
}
/** /**
* Return a container value as UniquePersistent and make sure the weak * Return a container value as UniquePersistent and make sure the weak
* callback is properly disposed of. All remove functionality should go * callback is properly disposed of. All remove functionality should go
* through this. * through this.
*/ */
V8_INLINE static UniquePersistent<V> Release(PersistentContainerValue v) { static UniquePersistent<V> Release(PersistentContainerValue v) {
UniquePersistent<V> p; UniquePersistent<V> p;
p.val_ = FromVal(v); p.val_ = FromVal(v);
if (Traits::kIsWeak && !p.IsEmpty()) { if (Traits::kCallbackType != kNotWeak && !p.IsEmpty()) {
Traits::DisposeCallbackData( Traits::DisposeCallbackData(
p.template ClearWeak<typename Traits::WeakCallbackDataType>()); p.template ClearWeak<typename Traits::WeakCallbackDataType>());
} }
@ -313,42 +365,121 @@ class StdPersistentValueMap : public PersistentValueMap<K, V, Traits> {
}; };
class DefaultPersistentValueVectorTraits {
public:
typedef std::vector<PersistentContainerValue> Impl;
static void Append(Impl* impl, PersistentContainerValue value) {
impl->push_back(value);
}
static bool IsEmpty(const Impl* impl) {
return impl->empty();
}
static size_t Size(const Impl* impl) {
return impl->size();
}
static PersistentContainerValue Get(const Impl* impl, size_t i) {
return (i < impl->size()) ? impl->at(i) : kPersistentContainerNotFound;
}
static void ReserveCapacity(Impl* impl, size_t capacity) {
impl->reserve(capacity);
}
static void Clear(Impl* impl) {
impl->clear();
}
};
/** /**
* Empty default implementations for StrongTraits methods. * A vector wrapper that safely stores UniquePersistent values.
* * C++11 embedders don't need this class, as they can use UniquePersistent
* These should not be necessary, since they're only used in code that * directly in std containers.
* is surrounded by if(Traits::kIsWeak), which for StrongMapTraits is
* compile-time false. Most compilers can live without them; however
* the compiler we use from 64-bit Win differs.
* *
* TODO(vogelheim): Remove these once they're no longer necessary. * This class relies on a backing vector implementation, whose type and methods
* are described by the Traits class. The backing map will handle values of type
* PersistentContainerValue, with all conversion into and out of V8
* handles being transparently handled by this class.
*/ */
template<typename K, typename V> template<typename V, typename Traits = DefaultPersistentValueVectorTraits>
typename StrongMapTraits<K, V>::WeakCallbackDataType* class PersistentValueVector {
StrongMapTraits<K, V>::WeakCallbackParameter( public:
Impl* impl, const K& key, Local<V> value) { explicit PersistentValueVector(Isolate* isolate) : isolate_(isolate) { }
return NULL;
}
~PersistentValueVector() {
Clear();
}
template<typename K, typename V> /**
typename StrongMapTraits<K, V>::Impl* * Append a value to the vector.
StrongMapTraits<K, V>::ImplFromWeakCallbackData( */
const WeakCallbackData<V, WeakCallbackDataType>& data) { void Append(Local<V> value) {
return NULL; UniquePersistent<V> persistent(isolate_, value);
} Traits::Append(&impl_, ClearAndLeak(&persistent));
}
/**
* Append a persistent's value to the vector.
*/
void Append(UniquePersistent<V> persistent) {
Traits::Append(&impl_, ClearAndLeak(&persistent));
};
template<typename K, typename V> /**
K StrongMapTraits<K, V>::KeyFromWeakCallbackData( * Are there any values in the vector?
const WeakCallbackData<V, WeakCallbackDataType>& data) { */
return K(); bool IsEmpty() const {
} return Traits::IsEmpty(&impl_);
}
/**
* How many elements are in the vector?
*/
size_t Size() const {
return Traits::Size(&impl_);
}
template<typename K, typename V> /**
void StrongMapTraits<K, V>::DisposeCallbackData(WeakCallbackDataType* data) { * Retrieve the i-th value in the vector.
} */
Local<V> Get(size_t index) const {
return Local<V>::New(isolate_, FromVal(Traits::Get(&impl_, index)));
}
/**
* Remove all elements from the vector.
*/
void Clear() {
size_t length = Traits::Size(&impl_);
for (size_t i = 0; i < length; i++) {
UniquePersistent<V> p;
p.val_ = FromVal(Traits::Get(&impl_, i));
}
Traits::Clear(&impl_);
}
/**
* Reserve capacity in the vector.
* (Efficiency gains depend on the backing implementation.)
*/
void ReserveCapacity(size_t capacity) {
Traits::ReserveCapacity(&impl_, capacity);
}
private:
static PersistentContainerValue ClearAndLeak(
UniquePersistent<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
}
static V* FromVal(PersistentContainerValue v) {
return reinterpret_cast<V*>(v);
}
Isolate* isolate_;
typename Traits::Impl impl_;
};
} // namespace v8 } // namespace v8

239
deps/v8/include/v8.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \mainpage V8 API Reference Guide /** \mainpage V8 API Reference Guide
* *
@ -129,6 +106,7 @@ template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent; class M = NonCopyablePersistentTraits<T> > class Persistent;
template<class T> class UniquePersistent; template<class T> class UniquePersistent;
template<class K, class V, class T> class PersistentValueMap; template<class K, class V, class T> class PersistentValueMap;
template<class V, class T> class PersistentValueVector;
template<class T, class P> class WeakCallbackObject; template<class T, class P> class WeakCallbackObject;
class FunctionTemplate; class FunctionTemplate;
class ObjectTemplate; class ObjectTemplate;
@ -315,15 +293,6 @@ template <class T> class Handle {
return New(isolate, that.val_); return New(isolate, that.val_);
} }
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
/**
* Creates a new handle for the specified value.
*/
V8_INLINE explicit Handle(T* val) : val_(val) {}
private: private:
friend class Utils; friend class Utils;
template<class F, class M> friend class Persistent; template<class F, class M> friend class Persistent;
@ -342,6 +311,11 @@ template <class T> class Handle {
friend class Object; friend class Object;
friend class Private; friend class Private;
/**
* Creates a new handle for the specified value.
*/
V8_INLINE explicit Handle(T* val) : val_(val) {}
V8_INLINE static Handle<T> New(Isolate* isolate, T* that); V8_INLINE static Handle<T> New(Isolate* isolate, T* that);
T* val_; T* val_;
@ -395,12 +369,6 @@ template <class T> class Local : public Handle<T> {
V8_INLINE static Local<T> New(Isolate* isolate, V8_INLINE static Local<T> New(Isolate* isolate,
const PersistentBase<T>& that); const PersistentBase<T>& that);
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { }
private: private:
friend class Utils; friend class Utils;
template<class F> friend class Eternal; template<class F> friend class Eternal;
@ -417,7 +385,9 @@ template <class T> class Local : public Handle<T> {
friend class HandleScope; friend class HandleScope;
friend class EscapableHandleScope; friend class EscapableHandleScope;
template<class F1, class F2, class F3> friend class PersistentValueMap; template<class F1, class F2, class F3> friend class PersistentValueMap;
template<class F1, class F2> friend class PersistentValueVector;
template <class S> V8_INLINE Local(S* that) : Handle<T>(that) { }
V8_INLINE static Local<T> New(Isolate* isolate, T* that); V8_INLINE static Local<T> New(Isolate* isolate, T* that);
}; };
@ -522,6 +492,13 @@ template <class T> class PersistentBase {
return !operator==(that); return !operator==(that);
} }
/**
* Install a finalization callback on this object.
* NOTE: There is no guarantee as to *when* or even *if* the callback is
* invoked. The invocation is performed solely on a best effort basis.
* As always, GC-based finalization should *not* be relied upon for any
* critical form of resource management!
*/
template<typename P> template<typename P>
V8_INLINE void SetWeak( V8_INLINE void SetWeak(
P* parameter, P* parameter,
@ -586,6 +563,7 @@ template <class T> class PersistentBase {
template<class F> friend class PersistentBase; template<class F> friend class PersistentBase;
template<class F> friend class ReturnValue; template<class F> friend class ReturnValue;
template<class F1, class F2, class F3> friend class PersistentValueMap; template<class F1, class F2, class F3> friend class PersistentValueMap;
template<class F1, class F2> friend class PersistentValueVector;
friend class Object; friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {} explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
@ -719,15 +697,6 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
// This will be removed. // This will be removed.
V8_INLINE T* ClearAndLeak(); V8_INLINE T* ClearAndLeak();
// TODO(dcarney): remove
#ifndef V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
private:
#endif
template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { }
V8_INLINE T* operator*() const { return this->val_; }
private: private:
friend class Isolate; friend class Isolate;
friend class Utils; friend class Utils;
@ -736,6 +705,8 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
template<class F1, class F2> friend class Persistent; template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue; template<class F> friend class ReturnValue;
template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { }
V8_INLINE T* operator*() const { return this->val_; }
template<class S, class M2> template<class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that); V8_INLINE void Copy(const Persistent<S, M2>& that);
}; };
@ -804,7 +775,7 @@ class UniquePersistent : public PersistentBase<T> {
/** /**
* Pass allows returning uniques from functions, etc. * Pass allows returning uniques from functions, etc.
*/ */
V8_INLINE UniquePersistent Pass() { return UniquePersistent(RValue(this)); } UniquePersistent Pass() { return UniquePersistent(RValue(this)); }
private: private:
UniquePersistent(UniquePersistent&); UniquePersistent(UniquePersistent&);
@ -936,53 +907,6 @@ class V8_EXPORT Data {
}; };
/**
* Pre-compilation data that can be associated with a script. This
* data can be calculated for a script in advance of actually
* compiling it, and can be stored between compilations. When script
* data is given to the compile method compilation will be faster.
*/
class V8_EXPORT ScriptData { // NOLINT
public:
virtual ~ScriptData() { }
/**
* Pre-compiles the specified script (context-independent).
*
* NOTE: Pre-compilation using this method cannot happen on another thread
* without using Lockers.
*
* \param source Script source code.
*/
static ScriptData* PreCompile(Handle<String> source);
/**
* Load previous pre-compilation data.
*
* \param data Pointer to data returned by a call to Data() of a previous
* ScriptData. Ownership is not transferred.
* \param length Length of data.
*/
static ScriptData* New(const char* data, int length);
/**
* Returns the length of Data().
*/
virtual int Length() = 0;
/**
* Returns a serialized representation of this ScriptData that can later be
* passed to New(). NOTE: Serialized data is platform-dependent.
*/
virtual const char* Data() = 0;
/**
* Returns true if the source code could not be parsed.
*/
virtual bool HasError() = 0;
};
/** /**
* The origin, within a file, of a script. * The origin, within a file, of a script.
*/ */
@ -1040,12 +964,9 @@ class V8_EXPORT Script {
public: public:
/** /**
* A shorthand for ScriptCompiler::Compile(). * A shorthand for ScriptCompiler::Compile().
* The ScriptData parameter will be deprecated; use ScriptCompiler::Compile if
* you want to pass it.
*/ */
static Local<Script> Compile(Handle<String> source, static Local<Script> Compile(Handle<String> source,
ScriptOrigin* origin = NULL, ScriptOrigin* origin = NULL);
ScriptData* script_data = NULL);
// To be decprecated, use the Compile above. // To be decprecated, use the Compile above.
static Local<Script> Compile(Handle<String> source, static Local<Script> Compile(Handle<String> source,
@ -1209,12 +1130,6 @@ class V8_EXPORT Message {
*/ */
Handle<Value> GetScriptResourceName() const; Handle<Value> GetScriptResourceName() const;
/**
* Returns the resource data for the script from where the function causing
* the error originates.
*/
Handle<Value> GetScriptData() const;
/** /**
* Exception stack trace. By default stack traces are not captured for * Exception stack trace. By default stack traces are not captured for
* uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
@ -2578,7 +2493,7 @@ class PropertyCallbackInfo {
public: public:
V8_INLINE Isolate* GetIsolate() const; V8_INLINE Isolate* GetIsolate() const;
V8_INLINE Local<Value> Data() const; V8_INLINE Local<Value> Data() const;
V8_INLINE Local<Object> This() const; V8_INLINE Local<Value> This() const;
V8_INLINE Local<Object> Holder() const; V8_INLINE Local<Object> Holder() const;
V8_INLINE ReturnValue<T> GetReturnValue() const; V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it. // This shouldn't be public, but the arm compiler needs it.
@ -3941,14 +3856,17 @@ class V8_EXPORT ResourceConstraints {
* *
* \param physical_memory The total amount of physical memory on the current * \param physical_memory The total amount of physical memory on the current
* device, in bytes. * device, in bytes.
* \param virtual_memory_limit The amount of virtual memory on the current
* device, in bytes, or zero, if there is no limit.
* \param number_of_processors The number of CPUs available on the current * \param number_of_processors The number of CPUs available on the current
* device. * device.
*/ */
void ConfigureDefaults(uint64_t physical_memory, void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit,
uint32_t number_of_processors); uint32_t number_of_processors);
int max_young_space_size() const { return max_young_space_size_; } int max_new_space_size() const { return max_new_space_size_; }
void set_max_young_space_size(int value) { max_young_space_size_ = value; } void set_max_new_space_size(int value) { max_new_space_size_ = value; }
int max_old_space_size() const { return max_old_space_size_; } int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int value) { max_old_space_size_ = value; } void set_max_old_space_size(int value) { max_old_space_size_ = value; }
int max_executable_size() const { return max_executable_size_; } int max_executable_size() const { return max_executable_size_; }
@ -3961,13 +3879,18 @@ class V8_EXPORT ResourceConstraints {
void set_max_available_threads(int value) { void set_max_available_threads(int value) {
max_available_threads_ = value; max_available_threads_ = value;
} }
int code_range_size() const { return code_range_size_; }
void set_code_range_size(int value) {
code_range_size_ = value;
}
private: private:
int max_young_space_size_; int max_new_space_size_;
int max_old_space_size_; int max_old_space_size_;
int max_executable_size_; int max_executable_size_;
uint32_t* stack_limit_; uint32_t* stack_limit_;
int max_available_threads_; int max_available_threads_;
int code_range_size_;
}; };
@ -4147,7 +4070,7 @@ class V8_EXPORT Isolate {
/** /**
* Assert that no Javascript code is invoked. * Assert that no Javascript code is invoked.
*/ */
class DisallowJavascriptExecutionScope { class V8_EXPORT DisallowJavascriptExecutionScope {
public: public:
enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE }; enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE };
@ -4168,7 +4091,7 @@ class V8_EXPORT Isolate {
/** /**
* Introduce exception to DisallowJavascriptExecutionScope. * Introduce exception to DisallowJavascriptExecutionScope.
*/ */
class AllowJavascriptExecutionScope { class V8_EXPORT AllowJavascriptExecutionScope {
public: public:
explicit AllowJavascriptExecutionScope(Isolate* isolate); explicit AllowJavascriptExecutionScope(Isolate* isolate);
~AllowJavascriptExecutionScope(); ~AllowJavascriptExecutionScope();
@ -4183,6 +4106,24 @@ class V8_EXPORT Isolate {
const AllowJavascriptExecutionScope&); const AllowJavascriptExecutionScope&);
}; };
/**
* Do not run microtasks while this scope is active, even if microtasks are
* automatically executed otherwise.
*/
class V8_EXPORT SuppressMicrotaskExecutionScope {
public:
explicit SuppressMicrotaskExecutionScope(Isolate* isolate);
~SuppressMicrotaskExecutionScope();
private:
internal::Isolate* isolate_;
// Prevent copying of Scope objects.
SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&);
SuppressMicrotaskExecutionScope& operator=(
const SuppressMicrotaskExecutionScope&);
};
/** /**
* Types of garbage collections that can be requested via * Types of garbage collections that can be requested via
* RequestGarbageCollectionForTesting. * RequestGarbageCollectionForTesting.
@ -4418,6 +4359,36 @@ class V8_EXPORT Isolate {
*/ */
void SetEventLogger(LogEventCallback that); void SetEventLogger(LogEventCallback that);
/**
* Adds a callback to notify the host application when a script finished
* running. If a script re-enters the runtime during executing, the
* CallCompletedCallback is only invoked when the outer-most script
* execution ends. Executing scripts inside the callback do not trigger
* further callbacks.
*/
void AddCallCompletedCallback(CallCompletedCallback callback);
/**
* Removes callback that was installed by AddCallCompletedCallback.
*/
void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
* Experimental: Runs the Microtask Work Queue until empty
*/
void RunMicrotasks();
/**
* Experimental: Enqueues the callback to the Microtask Work Queue
*/
void EnqueueMicrotask(Handle<Function> microtask);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*/
void SetAutorunMicrotasks(bool autorun);
private: private:
template<class K, class V, class Traits> friend class PersistentValueMap; template<class K, class V, class Traits> friend class PersistentValueMap;
@ -4779,33 +4750,25 @@ class V8_EXPORT V8 {
*/ */
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/**
* Adds a callback to notify the host application when a script finished
* running. If a script re-enters the runtime during executing, the
* CallCompletedCallback is only invoked when the outer-most script
* execution ends. Executing scripts inside the callback do not trigger
* further callbacks.
*/
static void AddCallCompletedCallback(CallCompletedCallback callback);
/**
* Removes callback that was installed by AddCallCompletedCallback.
*/
static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/** /**
* Experimental: Runs the Microtask Work Queue until empty * Experimental: Runs the Microtask Work Queue until empty
*
* Deprecated: Use methods on Isolate instead.
*/ */
static void RunMicrotasks(Isolate* isolate); static void RunMicrotasks(Isolate* isolate);
/** /**
* Experimental: Enqueues the callback to the Microtask Work Queue * Experimental: Enqueues the callback to the Microtask Work Queue
*
* Deprecated: Use methods on Isolate instead.
*/ */
static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask); static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
/** /**
* Experimental: Controls whether the Microtask Work Queue is automatically * Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero. * run when the script call depth decrements to zero.
*
* Deprecated: Use methods on Isolate instead.
*/ */
static void SetAutorunMicrotasks(Isolate *source, bool autorun); static void SetAutorunMicrotasks(Isolate *source, bool autorun);
@ -4870,15 +4833,14 @@ class V8_EXPORT V8 {
/** /**
* Forcefully terminate the current thread of JavaScript execution * Forcefully terminate the current thread of JavaScript execution
* in the given isolate. If no isolate is provided, the default * in the given isolate.
* isolate is used.
* *
* This method can be used by any thread even if that thread has not * This method can be used by any thread even if that thread has not
* acquired the V8 lock with a Locker object. * acquired the V8 lock with a Locker object.
* *
* \param isolate The isolate in which to terminate the current JS execution. * \param isolate The isolate in which to terminate the current JS execution.
*/ */
static void TerminateExecution(Isolate* isolate = NULL); static void TerminateExecution(Isolate* isolate);
/** /**
* Is V8 terminating JavaScript execution. * Is V8 terminating JavaScript execution.
@ -5246,8 +5208,13 @@ class V8_EXPORT Context {
*/ */
void Exit(); void Exit();
/** Returns true if the context has experienced an out of memory situation. */ /**
bool HasOutOfMemoryException() { return false; } * Returns true if the context has experienced an out of memory situation.
* Since V8 always treats OOM as fatal error, this can no longer return true.
* Therefore this is now deprecated.
* */
V8_DEPRECATED("This can no longer happen. OOM is a fatal error.",
bool HasOutOfMemoryException()) { return false; }
/** Returns an isolate associated with a current context. */ /** Returns an isolate associated with a current context. */
v8::Isolate* GetIsolate(); v8::Isolate* GetIsolate();
@ -5559,7 +5526,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize; static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 65; static const int kContextEmbedderDataIndex = 74;
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4; static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
@ -5571,7 +5538,7 @@ class Internals {
static const int kNullValueRootIndex = 7; static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8; static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9; static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 154; static const int kEmptyStringRootIndex = 162;
static const int kNodeClassIdOffset = 1 * kApiPointerSize; static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -6521,8 +6488,8 @@ Local<Value> PropertyCallbackInfo<T>::Data() const {
template<typename T> template<typename T>
Local<Object> PropertyCallbackInfo<T>::This() const { Local<Value> PropertyCallbackInfo<T>::This() const {
return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex])); return Local<Value>(reinterpret_cast<Value*>(&args_[kThisIndex]));
} }

27
deps/v8/include/v8config.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8CONFIG_H_ #ifndef V8CONFIG_H_
#define V8CONFIG_H_ #define V8CONFIG_H_

27
deps/v8/include/v8stdint.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Load definitions of standard types. // Load definitions of standard types.

14
deps/v8/samples/lineprocessor.cc

@ -27,9 +27,7 @@
#include <v8.h> #include <v8.h>
#ifdef ENABLE_DEBUGGER_SUPPORT
#include <v8-debug.h> #include <v8-debug.h>
#endif // ENABLE_DEBUGGER_SUPPORT
#include <fcntl.h> #include <fcntl.h>
#include <string.h> #include <string.h>
@ -109,7 +107,6 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
bool report_exceptions); bool report_exceptions);
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Persistent<v8::Context> debug_message_context; v8::Persistent<v8::Context> debug_message_context;
void DispatchDebugMessages() { void DispatchDebugMessages() {
@ -132,7 +129,6 @@ void DispatchDebugMessages() {
v8::Debug::ProcessDebugMessages(); v8::Debug::ProcessDebugMessages();
} }
#endif // ENABLE_DEBUGGER_SUPPORT
int RunMain(int argc, char* argv[]) { int RunMain(int argc, char* argv[]) {
@ -144,11 +140,9 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::Value> script_name; v8::Handle<v8::Value> script_name;
int script_param_counter = 0; int script_param_counter = 0;
#ifdef ENABLE_DEBUGGER_SUPPORT
int port_number = -1; int port_number = -1;
bool wait_for_connection = false; bool wait_for_connection = false;
bool support_callback = false; bool support_callback = false;
#endif // ENABLE_DEBUGGER_SUPPORT
MainCycleType cycle_type = CycleInCpp; MainCycleType cycle_type = CycleInCpp;
@ -162,7 +156,6 @@ int RunMain(int argc, char* argv[]) {
cycle_type = CycleInCpp; cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) { } else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs; cycle_type = CycleInJs;
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (strcmp(str, "--callback") == 0) { } else if (strcmp(str, "--callback") == 0) {
support_callback = true; support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) { } else if (strcmp(str, "--wait-for-connection") == 0) {
@ -170,7 +163,6 @@ int RunMain(int argc, char* argv[]) {
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) { } else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); // NOLINT port_number = atoi(argv[i + 1]); // NOLINT
i++; i++;
#endif // ENABLE_DEBUGGER_SUPPORT
} else if (strncmp(str, "--", 2) == 0) { } else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str); printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) { } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@ -218,7 +210,6 @@ int RunMain(int argc, char* argv[]) {
// Enter the newly created execution environment. // Enter the newly created execution environment.
v8::Context::Scope context_scope(context); v8::Context::Scope context_scope(context);
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_message_context.Reset(isolate, context); debug_message_context.Reset(isolate, context);
v8::Locker locker(isolate); v8::Locker locker(isolate);
@ -230,7 +221,6 @@ int RunMain(int argc, char* argv[]) {
if (port_number != -1) { if (port_number != -1) {
v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection); v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
} }
#endif // ENABLE_DEBUGGER_SUPPORT
bool report_exceptions = true; bool report_exceptions = true;
@ -275,9 +265,7 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context, v8::Local<v8::Context> context,
bool report_exceptions) { bool report_exceptions) {
v8::Isolate* isolate = context->GetIsolate(); v8::Isolate* isolate = context->GetIsolate();
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Locker lock(isolate); v8::Locker lock(isolate);
#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::Handle<v8::String> fun_name =
v8::String::NewFromUtf8(isolate, "ProcessLine"); v8::String::NewFromUtf8(isolate, "ProcessLine");
@ -435,9 +423,7 @@ v8::Handle<v8::String> ReadLine() {
char* res; char* res;
{ {
#ifdef ENABLE_DEBUGGER_SUPPORT
v8::Unlocker unlocker(v8::Isolate::GetCurrent()); v8::Unlocker unlocker(v8::Isolate::GetCurrent());
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin); res = fgets(buffer, kBufferSize, stdin);
} }
v8::Isolate* isolate = v8::Isolate::GetCurrent(); v8::Isolate* isolate = v8::Isolate::GetCurrent();

1227
deps/v8/src/accessors.cc

File diff suppressed because it is too large

147
deps/v8/src/accessors.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ACCESSORS_H_ #ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_ #define V8_ACCESSORS_H_
@ -36,42 +13,52 @@ namespace internal {
// The list of accessor descriptors. This is a second-order macro // The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names. // taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_DESCRIPTOR_LIST(V) \ #define ACCESSOR_INFO_LIST(V) \
V(FunctionPrototype) \ V(ArrayLength) \
V(FunctionLength) \
V(FunctionName) \
V(FunctionArguments) \ V(FunctionArguments) \
V(FunctionCaller) \ V(FunctionCaller) \
V(ArrayLength) \ V(FunctionName) \
V(StringLength) \ V(FunctionLength) \
V(ScriptSource) \ V(FunctionPrototype) \
V(ScriptName) \
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptColumnOffset) \ V(ScriptColumnOffset) \
V(ScriptType) \
V(ScriptCompilationType) \ V(ScriptCompilationType) \
V(ScriptLineEnds) \
V(ScriptContextData) \ V(ScriptContextData) \
V(ScriptEvalFromScript) \ V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \ V(ScriptEvalFromScriptPosition) \
V(ScriptEvalFromFunctionName) V(ScriptEvalFromFunctionName) \
V(ScriptId) \
V(ScriptLineEnds) \
V(ScriptLineOffset) \
V(ScriptName) \
V(ScriptSource) \
V(ScriptType) \
V(StringLength)
// Accessors contains all predefined proxy accessors. // Accessors contains all predefined proxy accessors.
class Accessors : public AllStatic { class Accessors : public AllStatic {
public: public:
// Accessor descriptors. // Accessor descriptors.
#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ #define ACCESSOR_INFO_DECLARATION(name) \
static const AccessorDescriptor name; static void name##Getter( \
ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) v8::Local<v8::String> name, \
#undef ACCESSOR_DESCRIPTOR_DECLARATION const v8::PropertyCallbackInfo<v8::Value>& info); \
static void name##Setter( \
v8::Local<v8::String> name, \
v8::Local<v8::Value> value, \
const v8::PropertyCallbackInfo<void>& info); \
static Handle<AccessorInfo> name##Info( \
Isolate* isolate, \
PropertyAttributes attributes);
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
enum DescriptorId { enum DescriptorId {
#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \ #define ACCESSOR_INFO_DECLARATION(name) \
k##name, k##name##Getter, \
ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION) k##name##Setter,
#undef ACCESSOR_DESCRIPTOR_DECLARATION ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
descriptorCount descriptorCount
}; };
@ -92,72 +79,16 @@ class Accessors : public AllStatic {
Handle<String> name, Handle<String> name,
int* object_offset); int* object_offset);
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate,
Handle<String> name,
AccessorGetterCallback getter,
AccessorSetterCallback setter,
PropertyAttributes attributes);
private: private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionSetPrototype(Isolate* isolate,
JSObject* object,
Object*,
void*);
static MaybeObject* FunctionGetPrototype(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetLength(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetName(Isolate* isolate, Object* object, void*);
static MaybeObject* FunctionGetArguments(Isolate* isolate,
Object* object,
void*);
static MaybeObject* FunctionGetCaller(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ArraySetLength(Isolate* isolate,
JSObject* object,
Object*,
void*);
static MaybeObject* ArrayGetLength(Isolate* isolate, Object* object, void*);
static MaybeObject* StringGetLength(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetName(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetId(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetSource(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetLineOffset(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetColumnOffset(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetType(Isolate* isolate, Object* object, void*);
static MaybeObject* ScriptGetCompilationType(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetLineEnds(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetContextData(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromScript(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromScriptPosition(Isolate* isolate,
Object* object,
void*);
static MaybeObject* ScriptGetEvalFromFunctionName(Isolate* isolate,
Object* object,
void*);
// Helper functions. // Helper functions.
static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value); static Handle<Object> FlattenNumber(Isolate* isolate, Handle<Object> value);
static MaybeObject* IllegalSetter(Isolate* isolate,
JSObject*,
Object*,
void*);
static Object* IllegalGetAccessor(Isolate* isolate, Object* object, void*);
static MaybeObject* ReadOnlySetAccessor(Isolate* isolate,
JSObject*,
Object* value,
void*);
}; };
} } // namespace v8::internal } } // namespace v8::internal

29
deps/v8/src/allocation-site-scopes.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "allocation-site-scopes.h" #include "allocation-site-scopes.h"
@ -62,7 +39,7 @@ Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
void AllocationSiteCreationContext::ExitScope( void AllocationSiteCreationContext::ExitScope(
Handle<AllocationSite> scope_site, Handle<AllocationSite> scope_site,
Handle<JSObject> object) { Handle<JSObject> object) {
if (!object.is_null() && !object->IsFailure()) { if (!object.is_null()) {
bool top_level = !scope_site.is_null() && bool top_level = !scope_site.is_null() &&
top().is_identical_to(scope_site); top().is_identical_to(scope_site);

27
deps/v8/src/allocation-site-scopes.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_SITE_SCOPES_H_ #ifndef V8_ALLOCATION_SITE_SCOPES_H_
#define V8_ALLOCATION_SITE_SCOPES_H_ #define V8_ALLOCATION_SITE_SCOPES_H_

38
deps/v8/src/allocation-tracker.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -211,11 +188,6 @@ void AddressToTraceMap::RemoveRange(Address start, Address end) {
} }
static bool AddressesMatch(void* key1, void* key2) {
return key1 == key2;
}
void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) { void AllocationTracker::DeleteFunctionInfo(FunctionInfo** info) {
delete *info; delete *info;
} }
@ -225,7 +197,7 @@ AllocationTracker::AllocationTracker(
HeapObjectsMap* ids, StringsStorage* names) HeapObjectsMap* ids, StringsStorage* names)
: ids_(ids), : ids_(ids),
names_(names), names_(names),
id_to_function_info_index_(AddressesMatch), id_to_function_info_index_(HashMap::PointersMatch),
info_index_for_other_state_(0) { info_index_for_other_state_(0) {
FunctionInfo* info = new FunctionInfo(); FunctionInfo* info = new FunctionInfo();
info->name = "(root)"; info->name = "(root)";
@ -354,8 +326,8 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() { void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return; if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate()); HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_); info_->line = Script::GetLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_); info_->column = Script::GetColumnNumber(script_, start_position_);
} }

27
deps/v8/src/allocation-tracker.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_TRACKER_H_ #ifndef V8_ALLOCATION_TRACKER_H_
#define V8_ALLOCATION_TRACKER_H_ #define V8_ALLOCATION_TRACKER_H_

27
deps/v8/src/allocation.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "allocation.h" #include "allocation.h"

27
deps/v8/src/allocation.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_H_ #ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_ #define V8_ALLOCATION_H_

1066
deps/v8/src/api.cc

File diff suppressed because it is too large

35
deps/v8/src/api.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_API_H_ #ifndef V8_API_H_
#define V8_API_H_ #define V8_API_H_
@ -186,9 +163,9 @@ class RegisteredExtension {
V(Script, JSFunction) \ V(Script, JSFunction) \
V(UnboundScript, SharedFunctionInfo) \ V(UnboundScript, SharedFunctionInfo) \
V(Function, JSFunction) \ V(Function, JSFunction) \
V(Message, JSObject) \ V(Message, JSMessageObject) \
V(Context, Context) \ V(Context, Context) \
V(External, Foreign) \ V(External, Object) \
V(StackTrace, JSArray) \ V(StackTrace, JSArray) \
V(StackFrame, JSObject) \ V(StackFrame, JSObject) \
V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor) V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
@ -393,8 +370,8 @@ MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor)
const v8::From* that, bool allow_empty_handle) { \ const v8::From* that, bool allow_empty_handle) { \
EXTRA_CHECK(allow_empty_handle || that != NULL); \ EXTRA_CHECK(allow_empty_handle || that != NULL); \
EXTRA_CHECK(that == NULL || \ EXTRA_CHECK(that == NULL || \
!(*reinterpret_cast<v8::internal::To**>( \ (*reinterpret_cast<v8::internal::Object**>( \
const_cast<v8::From*>(that)))->IsFailure()); \ const_cast<v8::From*>(that)))->Is##To()); \
return v8::internal::Handle<v8::internal::To>( \ return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \ reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
} }

61
deps/v8/src/apinatives.js

@ -1,29 +1,6 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file contains infrastructure used by the API. See // This file contains infrastructure used by the API. See
// v8natives.js for an explanation of these files are processed and // v8natives.js for an explanation of these files are processed and
@ -71,31 +48,29 @@ function InstantiateFunction(data, name) {
(serialNumber in cache) && (cache[serialNumber] != kUninitialized); (serialNumber in cache) && (cache[serialNumber] != kUninitialized);
if (!isFunctionCached) { if (!isFunctionCached) {
try { try {
var fun = %CreateApiFunction(data);
if (name) %FunctionSetName(fun, name);
var flags = %GetTemplateField(data, kApiFlagOffset); var flags = %GetTemplateField(data, kApiFlagOffset);
var doNotCache = flags & (1 << kDoNotCacheBit); var has_proto = !(flags & (1 << kRemovePrototypeBit));
if (!doNotCache) cache[serialNumber] = fun; var prototype;
if (flags & (1 << kRemovePrototypeBit)) { if (has_proto) {
%FunctionRemovePrototype(fun); var template = %GetTemplateField(data, kApiPrototypeTemplateOffset);
} else { prototype = typeof template === 'undefined'
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset); ? {} : Instantiate(template);
// Note: Do not directly use an object template as a condition, our
// internal ToBoolean doesn't handle that!
fun.prototype = typeof prototype === 'undefined' ?
{} : Instantiate(prototype);
if (flags & (1 << kReadOnlyPrototypeBit)) {
%FunctionSetReadOnlyPrototype(fun);
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset); var parent = %GetTemplateField(data, kApiParentTemplateOffset);
// Note: Do not directly use a function template as a condition, our // Note: Do not directly use a function template as a condition, our
// internal ToBoolean doesn't handle that! // internal ToBoolean doesn't handle that!
if (!(typeof parent === 'undefined')) { if (typeof parent !== 'undefined') {
var parent_fun = Instantiate(parent); var parent_fun = Instantiate(parent);
%SetPrototype(fun.prototype, parent_fun.prototype); %SetPrototype(prototype, parent_fun.prototype);
} }
} }
var fun = %CreateApiFunction(data, prototype);
if (name) %FunctionSetName(fun, name);
var doNotCache = flags & (1 << kDoNotCacheBit);
if (!doNotCache) cache[serialNumber] = fun;
if (has_proto && flags & (1 << kReadOnlyPrototypeBit)) {
%FunctionSetReadOnlyPrototype(fun);
}
ConfigureTemplateInstance(fun, data); ConfigureTemplateInstance(fun, data);
if (doNotCache) return fun; if (doNotCache) return fun;
} catch (e) { } catch (e) {

27
deps/v8/src/arguments.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
#include "arguments.h" #include "arguments.h"

38
deps/v8/src/arguments.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARGUMENTS_H_ #ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_ #define V8_ARGUMENTS_H_
@ -299,10 +276,10 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#endif #endif
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \ #define DECLARE_RUNTIME_FUNCTION(Name) \
Type Name(int args_length, Object** args_object, Isolate* isolate) Object* Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \ #define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \ static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
CLOBBER_DOUBLE_REGISTERS(); \ CLOBBER_DOUBLE_REGISTERS(); \
@ -311,6 +288,11 @@ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
} \ } \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate) static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
#define RUNTIME_ARGUMENTS(isolate, args) \ #define RUNTIME_ARGUMENTS(isolate, args) \
args.length(), args.arguments(), isolate args.length(), args.arguments(), isolate

14
deps/v8/src/arm/assembler-arm-inl.h

@ -222,7 +222,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
} }
static const int kNoCodeAgeSequenceLength = 3; static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
@ -234,15 +234,15 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() { Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress( return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ + Assembler::kInstrSize * Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - 1))); (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
} }
void RelocInfo::set_code_age_stub(Code* stub) { void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ + Assembler::kInstrSize * Memory::Address_at(pc_ +
(kNoCodeAgeSequenceLength - 1)) = (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start(); stub->instruction_start();
} }
@ -323,14 +323,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitExternalReference(this); visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) { } else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this); visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) && } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) && IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) { isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this); visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) { } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this); visitor->VisitRuntimeEntry(this);
} }
@ -350,14 +348,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitExternalReference(this); StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) { } else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this); StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() && } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) { IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this); StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) { } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this); StaticVisitor::VisitRuntimeEntry(this);
} }

80
deps/v8/src/arm/assembler-arm.cc

@ -100,10 +100,11 @@ const char* DwVfpRegister::AllocationIndexToString(int index) {
} }
void CpuFeatures::Probe() { void CpuFeatures::Probe(bool serializer_enabled) {
uint64_t standard_features = static_cast<unsigned>( uint64_t standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
ASSERT(supported_ == 0 || supported_ == standard_features); ASSERT(supported_ == 0 ||
(supported_ & standard_features) == standard_features);
#ifdef DEBUG #ifdef DEBUG
initialized_ = true; initialized_ = true;
#endif #endif
@ -113,10 +114,8 @@ void CpuFeatures::Probe() {
// snapshot. // snapshot.
supported_ |= standard_features; supported_ |= standard_features;
if (Serializer::enabled()) { if (serializer_enabled) {
// No probing for features if we might serialize (generate snapshot). // No probing for features if we might serialize (generate snapshot).
printf(" ");
PrintFeatures();
return; return;
} }
@ -1077,15 +1076,11 @@ static bool fits_shifter(uint32_t imm32,
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly // space. There is no guarantee that the relocated location can be similarly
// encoded. // encoded.
bool Operand::must_output_reloc_info(const Assembler* assembler) const { bool Operand::must_output_reloc_info(Isolate* isolate,
const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true; if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled(); return Serializer::enabled(isolate);
} else if (RelocInfo::IsNone(rmode_)) { } else if (RelocInfo::IsNone(rmode_)) {
return false; return false;
} }
@ -1093,7 +1088,8 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
} }
static bool use_mov_immediate_load(const Operand& x, static bool use_mov_immediate_load(Isolate* isolate,
const Operand& x,
const Assembler* assembler) { const Assembler* assembler) {
if (assembler != NULL && !assembler->can_use_constant_pool()) { if (assembler != NULL && !assembler->can_use_constant_pool()) {
// If there is no constant pool available, we must use an mov immediate. // If there is no constant pool available, we must use an mov immediate.
@ -1104,7 +1100,7 @@ static bool use_mov_immediate_load(const Operand& x,
(assembler == NULL || !assembler->predictable_code_size())) { (assembler == NULL || !assembler->predictable_code_size())) {
// Prefer movw / movt to constant pool if it is more efficient on the CPU. // Prefer movw / movt to constant pool if it is more efficient on the CPU.
return true; return true;
} else if (x.must_output_reloc_info(assembler)) { } else if (x.must_output_reloc_info(isolate, assembler)) {
// Prefer constant pool if data is likely to be patched. // Prefer constant pool if data is likely to be patched.
return false; return false;
} else { } else {
@ -1114,17 +1110,18 @@ static bool use_mov_immediate_load(const Operand& x,
} }
bool Operand::is_single_instruction(const Assembler* assembler, bool Operand::is_single_instruction(Isolate* isolate,
const Assembler* assembler,
Instr instr) const { Instr instr) const {
if (rm_.is_valid()) return true; if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
if (must_output_reloc_info(assembler) || if (must_output_reloc_info(isolate, assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of // The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the // constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used. // condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
return !use_mov_immediate_load(*this, assembler); return !use_mov_immediate_load(isolate, *this, assembler);
} else { } else {
// If this is not a mov or mvn instruction there will always an additional // If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two // instructions - either mov or ldr. The mov might actually be two
@ -1144,15 +1141,16 @@ void Assembler::move_32_bit_immediate(Register rd,
const Operand& x, const Operand& x,
Condition cond) { Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
if (x.must_output_reloc_info(this)) { if (x.must_output_reloc_info(isolate(), this)) {
RecordRelocInfo(rinfo); RecordRelocInfo(rinfo);
} }
if (use_mov_immediate_load(x, this)) { if (use_mov_immediate_load(isolate(), x, this)) {
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
// TODO(rmcilroy): add ARMv6 support for immediate loads. // TODO(rmcilroy): add ARMv6 support for immediate loads.
ASSERT(CpuFeatures::IsSupported(ARMv7)); ASSERT(CpuFeatures::IsSupported(ARMv7));
if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) { if (!FLAG_enable_ool_constant_pool &&
x.must_output_reloc_info(isolate(), this)) {
// Make sure the movw/movt doesn't get separated. // Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2); BlockConstPoolFor(2);
} }
@ -1180,7 +1178,7 @@ void Assembler::addrmod1(Instr instr,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (x.must_output_reloc_info(this) || if (x.must_output_reloc_info(isolate(), this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load // The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip. // it first to register ip and change the original instruction to use ip.
@ -1862,7 +1860,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
uint32_t immed_8; uint32_t immed_8;
if (src.must_output_reloc_info(this) || if (src.must_output_reloc_info(isolate(), this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip. // Immediate operand cannot be encoded, load it first to register ip.
move_32_bit_immediate(ip, src); move_32_bit_immediate(ip, src);
@ -2827,8 +2825,9 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
ASSERT(CpuFeatures::IsSupported(VFP3)); ASSERT(CpuFeatures::IsSupported(VFP3));
int vd, d; int vd, d;
dst.split_code(&vd, &d); dst.split_code(&vd, &d);
int i = ((32 - fraction_bits) >> 4) & 1; int imm5 = 32 - fraction_bits;
int imm4 = (32 - fraction_bits) & 0xf; int i = imm5 & 1;
int imm4 = (imm5 >> 1) & 0xf;
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
} }
@ -3161,9 +3160,7 @@ void Assembler::RecordComment(const char* msg) {
void Assembler::RecordConstPool(int size) { void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the // We only need this for debugger support, to correctly compute offsets in the
// code. // code.
#ifdef ENABLE_DEBUGGER_SUPPORT
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
#endif
} }
@ -3266,12 +3263,7 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) { if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized. // Don't record external references unless the heap will be serialized.
if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) { if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
if (!Serializer::enabled() && !emit_debug_code()) {
return; return;
} }
} }
@ -3502,7 +3494,8 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// data // data
bool found = false; bool found = false;
if (!Serializer::enabled() && (rinfo.rmode() >= RelocInfo::CELL)) { if (!Serializer::enabled(isolate()) &&
(rinfo.rmode() >= RelocInfo::CELL)) {
for (int j = 0; j < i; j++) { for (int j = 0; j < i; j++) {
RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j];
@ -3547,14 +3540,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} }
MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
ASSERT(FLAG_enable_ool_constant_pool); if (!FLAG_enable_ool_constant_pool) {
return constant_pool_builder_.Allocate(heap); return isolate->factory()->empty_constant_pool_array();
}
return constant_pool_builder_.New(isolate);
} }
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
ASSERT(FLAG_enable_ool_constant_pool);
constant_pool_builder_.Populate(this, constant_pool); constant_pool_builder_.Populate(this, constant_pool);
} }
@ -3605,7 +3599,7 @@ void ConstantPoolBuilder::AddEntry(Assembler* assm,
// Try to merge entries which won't be patched. // Try to merge entries which won't be patched.
int merged_index = -1; int merged_index = -1;
if (RelocInfo::IsNone(rmode) || if (RelocInfo::IsNone(rmode) ||
(!Serializer::enabled() && (rmode >= RelocInfo::CELL))) { (!Serializer::enabled(assm->isolate()) && (rmode >= RelocInfo::CELL))) {
size_t i; size_t i;
std::vector<RelocInfo>::const_iterator it; std::vector<RelocInfo>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
@ -3654,12 +3648,14 @@ void ConstantPoolBuilder::Relocate(int pc_delta) {
} }
MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) { Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) { if (IsEmpty()) {
return heap->empty_constant_pool_array(); return isolate->factory()->empty_constant_pool_array();
} else { } else {
return heap->AllocateConstantPoolArray(count_of_64bit_, count_of_code_ptr_, return isolate->factory()->NewConstantPoolArray(count_of_64bit_,
count_of_heap_ptr_, count_of_32bit_); count_of_code_ptr_,
count_of_heap_ptr_,
count_of_32bit_);
} }
} }

27
deps/v8/src/arm/assembler-arm.h

@ -56,7 +56,7 @@ class CpuFeatures : public AllStatic {
public: public:
// Detect features of the target CPU. Set safe defaults if the serializer // Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable). // is enabled (snapshots must be portable).
static void Probe(); static void Probe(bool serializer_enabled);
// Display target use when compiling. // Display target use when compiling.
static void PrintTarget(); static void PrintTarget();
@ -70,15 +70,11 @@ class CpuFeatures : public AllStatic {
return Check(f, supported_); return Check(f, supported_);
} }
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
ASSERT(initialized_);
return Check(f, found_by_runtime_probing_only_);
}
static bool IsSafeForSnapshot(CpuFeature f) {
return Check(f, cross_compile_) || return Check(f, cross_compile_) ||
(IsSupported(f) && (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); !(Serializer::enabled(isolate) &&
Check(f, found_by_runtime_probing_only_)));
} }
static unsigned cache_line_size() { return cache_line_size_; } static unsigned cache_line_size() { return cache_line_size_; }
@ -93,6 +89,8 @@ class CpuFeatures : public AllStatic {
(cross_compile_ & mask) == mask; (cross_compile_ & mask) == mask;
} }
static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(VFP3); }
private: private:
static bool Check(CpuFeature f, unsigned set) { static bool Check(CpuFeature f, unsigned set) {
return (set & flag2set(f)) != 0; return (set & flag2set(f)) != 0;
@ -590,8 +588,11 @@ class Operand BASE_EMBEDDED {
// the instruction this operand is used for is a MOV or MVN instruction the // the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other // actual instruction to use is required for this calculation. For other
// instructions instr is ignored. // instructions instr is ignored.
bool is_single_instruction(const Assembler* assembler, Instr instr = 0) const; bool is_single_instruction(Isolate* isolate,
bool must_output_reloc_info(const Assembler* assembler) const; const Assembler* assembler,
Instr instr = 0) const;
bool must_output_reloc_info(Isolate* isolate,
const Assembler* assembler) const;
inline int32_t immediate() const { inline int32_t immediate() const {
ASSERT(!rm_.is_valid()); ASSERT(!rm_.is_valid());
@ -714,7 +715,7 @@ class ConstantPoolBuilder BASE_EMBEDDED {
void AddEntry(Assembler* assm, const RelocInfo& rinfo); void AddEntry(Assembler* assm, const RelocInfo& rinfo);
void Relocate(int pc_delta); void Relocate(int pc_delta);
bool IsEmpty(); bool IsEmpty();
MaybeObject* Allocate(Heap* heap); Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool); void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
inline int count_of_64bit() const { return count_of_64bit_; } inline int count_of_64bit() const { return count_of_64bit_; }
@ -728,6 +729,8 @@ class ConstantPoolBuilder BASE_EMBEDDED {
bool IsCodePtrEntry(RelocInfo::Mode rmode); bool IsCodePtrEntry(RelocInfo::Mode rmode);
bool IsHeapPtrEntry(RelocInfo::Mode rmode); bool IsHeapPtrEntry(RelocInfo::Mode rmode);
// TODO(rmcilroy): This should ideally be a ZoneList, however that would mean
// RelocInfo would need to subclass ZoneObject which it currently doesn't.
std::vector<RelocInfo> entries_; std::vector<RelocInfo> entries_;
std::vector<int> merged_indexes_; std::vector<int> merged_indexes_;
int count_of_64bit_; int count_of_64bit_;
@ -1498,7 +1501,7 @@ class Assembler : public AssemblerBase {
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code. // Allocate a constant pool of the correct size for the generated code.
MaybeObject* AllocateConstantPool(Heap* heap); Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code. // Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool); void PopulateConstantPool(ConstantPoolArray* constant_pool);

65
deps/v8/src/arm/builtins-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -376,14 +353,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label rt_call, allocated; Label rt_call, allocated;
if (FLAG_inline_new) { if (FLAG_inline_new) {
Label undo_allocation; Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate); ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp)); __ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
__ tst(r2, r2); __ tst(r2, r2);
__ b(ne, &rt_call); __ b(ne, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
// r1: constructor function // r1: constructor function
@ -807,7 +782,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
if (is_construct) { if (is_construct) {
// No type feedback cell is available // No type feedback cell is available
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
ParameterCount actual(r0); ParameterCount actual(r0);
@ -923,7 +898,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Jump to point after the code-age stub. // Jump to point after the code-age stub.
__ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize)); __ add(r0, r0, Operand(kNoCodeAgeSequenceLength));
__ mov(pc, r0); __ mov(pc, r0);
} }
@ -1284,7 +1259,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Out of stack space. // Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset)); __ ldr(r1, MemOperand(fp, kFunctionOffset));
__ Push(r1, r0); __ Push(r1, r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// End of stack check. // End of stack check.
// Push current limit and index. // Push current limit and index.
@ -1407,6 +1382,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
} }
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
// -----------------------------------
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
// Make r5 the space we have left. The stack might already be overflowed
// here which will cause r5 to become negative.
__ sub(r5, sp, r5);
// Check if the arguments will overflow the stack.
__ cmp(r5, Operand(r2, LSL, kPointerSizeLog2));
__ b(le, stack_overflow); // Signed comparison.
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0); __ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@ -1446,6 +1441,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r2 : expected number of arguments // -- r2 : expected number of arguments
// ----------------------------------- // -----------------------------------
Label stack_overflow;
ArgumentAdaptorStackCheck(masm, &stack_overflow);
Label invoke, dont_adapt_arguments; Label invoke, dont_adapt_arguments;
Label enough, too_few; Label enough, too_few;
@ -1545,6 +1542,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ------------------------------------------- // -------------------------------------------
__ bind(&dont_adapt_arguments); __ bind(&dont_adapt_arguments);
__ Jump(r3); __ Jump(r3);
__ bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bkpt(0);
}
} }

929
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

52
deps/v8/src/arm/code-stubs-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODE_STUBS_ARM_H_ #ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_ #define V8_ARM_CODE_STUBS_ARM_H_
@ -39,8 +16,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub {
public: public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: save_doubles_(save_fp) {} : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -91,7 +68,7 @@ class StringHelper : public AllStatic {
class SubStringStub: public PlatformCodeStub { class SubStringStub: public PlatformCodeStub {
public: public:
SubStringStub() {} explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private: private:
Major MajorKey() { return SubString; } Major MajorKey() { return SubString; }
@ -104,7 +81,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub { class StringCompareStub: public PlatformCodeStub {
public: public:
StringCompareStub() { } explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in r0. // Compares two flat ASCII strings and returns result in r0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
@ -144,10 +121,12 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame. // so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub { class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public: public:
WriteInt32ToHeapNumberStub(Register the_int, WriteInt32ToHeapNumberStub(Isolate* isolate,
Register the_int,
Register the_heap_number, Register the_heap_number,
Register scratch) Register scratch)
: the_int_(the_int), : PlatformCodeStub(isolate),
the_int_(the_int),
the_heap_number_(the_heap_number), the_heap_number_(the_heap_number),
scratch_(scratch) { } scratch_(scratch) { }
@ -177,12 +156,14 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
class RecordWriteStub: public PlatformCodeStub { class RecordWriteStub: public PlatformCodeStub {
public: public:
RecordWriteStub(Register object, RecordWriteStub(Isolate* isolate,
Register object,
Register value, Register value,
Register address, Register address,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode) SaveFPRegsMode fp_mode)
: object_(object), : PlatformCodeStub(isolate),
object_(object),
value_(value), value_(value),
address_(address), address_(address),
remembered_set_action_(remembered_set_action), remembered_set_action_(remembered_set_action),
@ -363,7 +344,7 @@ class RecordWriteStub: public PlatformCodeStub {
// moved by GC // moved by GC
class DirectCEntryStub: public PlatformCodeStub { class DirectCEntryStub: public PlatformCodeStub {
public: public:
DirectCEntryStub() {} explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target); void GenerateCall(MacroAssembler* masm, Register target);
@ -379,7 +360,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public: public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);

101
deps/v8/src/arm/codegen-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -102,13 +79,11 @@ UnaryMathFunction CreateExpFunction() {
#if defined(V8_HOST_ARCH_ARM) #if defined(V8_HOST_ARCH_ARM)
OS::MemCopyUint8Function CreateMemCopyUint8Function( OS::MemCopyUint8Function CreateMemCopyUint8Function(
OS::MemCopyUint8Function stub) { OS::MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)
return stub; return stub;
#else #else
if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
return stub;
}
size_t actual_size; size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub; if (buffer == NULL) return stub;
@ -260,13 +235,11 @@ OS::MemCopyUint8Function CreateMemCopyUint8Function(
// Convert 8 to 16. The number of character to copy must be at least 8. // Convert 8 to 16. The number of character to copy must be at least 8.
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function( OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
OS::MemCopyUint16Uint8Function stub) { OS::MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)
return stub; return stub;
#else #else
if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
return stub;
}
size_t actual_size; size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub; if (buffer == NULL) return stub;
@ -849,47 +822,46 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
#endif #endif
static byte* GetNoCodeAgeSequence(uint32_t* length) { CodeAgingHelper::CodeAgingHelper() {
// The sequence of instructions that is patched out for aging code is the ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
// following boilerplate stack-building prologue that is found in FUNCTIONS // Since patcher is a large object, allocate it dynamically when needed,
static bool initialized = false; // to avoid overloading the stack in stress conditions.
static uint32_t sequence[kNoCodeAgeSequenceLength]; // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
byte* byte_sequence = reinterpret_cast<byte*>(sequence); // the process, before ARM simulator ICache is setup.
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; SmartPointer<CodePatcher> patcher(
if (!initialized) { new CodePatcher(young_sequence_.start(),
// Since patcher is a large object, allocate it dynamically when needed, young_sequence_.length() / Assembler::kInstrSize,
// to avoid overloading the stack in stress conditions. CodePatcher::DONT_FLUSH));
SmartPointer<CodePatcher> PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength)); patcher->masm()->PushFixedFrame(r1);
PredictableCodeSizeScope scope(patcher->masm(), *length); patcher->masm()->nop(ip.code());
patcher->masm()->PushFixedFrame(r1); patcher->masm()->add(
patcher->masm()->nop(ip.code()); fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
patcher->masm()->add( }
fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
initialized = true;
} #ifdef DEBUG
return byte_sequence; bool CodeAgingHelper::IsOld(byte* candidate) const {
return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
} }
#endif
bool Code::IsYoungSequence(byte* sequence) { bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
uint32_t young_length; bool result = isolate->code_aging_helper()->IsYoung(sequence);
byte* young_sequence = GetNoCodeAgeSequence(&young_length); ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
bool result = !memcmp(sequence, young_sequence, young_length);
ASSERT(result ||
Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
return result; return result;
} }
void Code::GetCodeAgeAndParity(byte* sequence, Age* age, void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) { MarkingParity* parity) {
if (IsYoungSequence(sequence)) { if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge; *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY; *parity = NO_MARKING_PARITY;
} else { } else {
Address target_address = Memory::Address_at( Address target_address = Memory::Address_at(
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address); Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity); GetCodeAgeAndParity(stub, age, parity);
} }
@ -900,10 +872,9 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence, byte* sequence,
Code::Age age, Code::Age age,
MarkingParity parity) { MarkingParity parity) {
uint32_t young_length; uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAgeCodeAge) { if (age == kNoAgeCodeAge) {
CopyBytes(sequence, young_sequence, young_length); isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
CPU::FlushICache(sequence, young_length); CPU::FlushICache(sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(isolate, age, parity); Code* stub = GetCodeAgeStub(isolate, age, parity);

27
deps/v8/src/arm/codegen-arm.h

@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODEGEN_ARM_H_ #ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_

27
deps/v8/src/arm/constants-arm.cc

@ -1,29 +1,6 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

27
deps/v8/src/arm/constants-arm.h

@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CONSTANTS_ARM_H_ #ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_

37
deps/v8/src/arm/cpu-arm.cc

@ -1,29 +1,6 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here. // CPU specific code for arm independent of OS goes here.
#ifdef __arm__ #ifdef __arm__
@ -46,16 +23,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
void CPU::SetUp() {
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return CpuFeatures::IsSupported(VFP3);
}
void CPU::FlushICache(void* start, size_t size) { void CPU::FlushICache(void* start, size_t size) {
// Nothing to do flushing no instructions. // Nothing to do flushing no instructions.
if (size == 0) { if (size == 0) {

68
deps/v8/src/arm/debug-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -35,7 +12,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() { bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo()); return Debug::IsDebugBreakAtReturn(rinfo());
} }
@ -56,7 +32,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip); patcher.masm()->blx(v8::internal::ip);
patcher.Emit( patcher.Emit(
debug_info_->GetIsolate()->debug()->debug_break_return()->entry()); debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry());
patcher.masm()->bkpt(0); patcher.masm()->bkpt(0);
} }
@ -97,7 +73,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0)); patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip); patcher.masm()->blx(v8::internal::ip);
patcher.Emit( patcher.Emit(
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry()); debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry());
} }
@ -146,7 +122,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ mov(r0, Operand::Zero()); // no arguments __ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1); CEntryStub ceb(masm->isolate(), 1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Restore the register values from the expression stack. // Restore the register values from the expression stack.
@ -179,6 +155,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
} }
void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- r1 : function
// -- r3 : slot in feedback array (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r3.bit(), 0);
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -235,15 +221,6 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
} }
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
// -- r2 : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which // In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with // is an object - this is not generally the case so this should be used with
@ -261,17 +238,6 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
} }
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -- r2 : feedback array
// -- r3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0);
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm.cc) // Calling convention for CallConstructStub (from code-stubs-arm.cc)
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -329,10 +295,6 @@ const bool Debug::kFrameDropperSupported = false;
#undef __ #undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

32
deps/v8/src/arm/deoptimizer-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -54,7 +31,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Fail hard and early if we enter this code object again. // Fail hard and early if we enter this code object again.
byte* pointer = code->FindCodeAgeSequence(); byte* pointer = code->FindCodeAgeSequence();
if (pointer != NULL) { if (pointer != NULL) {
pointer += kNoCodeAgeSequenceLength * Assembler::kInstrSize; pointer += kNoCodeAgeSequenceLength;
} else { } else {
pointer = code->instruction_start(); pointer = code->instruction_start();
} }
@ -87,7 +64,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// We need calls to have a predictable size in the unoptimized code, but // We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size. // this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes = int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
deopt_entry,
RelocInfo::NONE32); RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);

29
deps/v8/src/arm/disasm-arm.cc

@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A Disassembler object is used to disassemble a block of code instruction by // A Disassembler object is used to disassemble a block of code instruction by
// instruction. The default implementation of the NameConverter object can be // instruction. The default implementation of the NameConverter object can be
@ -1272,7 +1249,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
(instr->Bit(8) == 1)) { (instr->Bit(8) == 1)) {
// vcvt.f64.s32 Dd, Dd, #<fbits> // vcvt.f64.s32 Dd, Dd, #<fbits>
int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd"); Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", fraction_bits); ", #%d", fraction_bits);

27
deps/v8/src/arm/frames-arm.cc

@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

27
deps/v8/src/arm/frames-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_FRAMES_ARM_H_ #ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_ #define V8_ARM_FRAMES_ARM_H_

279
deps/v8/src/arm/full-codegen-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -118,14 +95,20 @@ static void EmitStackCheck(MacroAssembler* masm_,
Isolate* isolate = masm_->isolate(); Isolate* isolate = masm_->isolate();
Label ok; Label ok;
ASSERT(scratch.is(sp) == (pointers == 0)); ASSERT(scratch.is(sp) == (pointers == 0));
Heap::RootListIndex index;
if (pointers != 0) { if (pointers != 0) {
__ sub(scratch, sp, Operand(pointers * kPointerSize)); __ sub(scratch, sp, Operand(pointers * kPointerSize));
index = Heap::kRealStackLimitRootIndex;
} else {
index = Heap::kStackLimitRootIndex;
} }
__ LoadRoot(stack_limit_scratch, Heap::kStackLimitRootIndex); __ LoadRoot(stack_limit_scratch, index);
__ cmp(scratch, Operand(stack_limit_scratch)); __ cmp(scratch, Operand(stack_limit_scratch));
__ b(hs, &ok); __ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); Handle<Code> stack_check = isolate->builtins()->StackCheck();
__ Call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET); PredictableCodeSizeScope predictable(masm_,
masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
__ Call(stack_check, RelocInfo::CODE_TARGET);
__ bind(&ok); __ bind(&ok);
} }
@ -150,8 +133,6 @@ void FullCodeGenerator::Generate() {
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
InitializeFeedbackVector();
profiling_counter_ = isolate()->factory()->NewCell( profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function()); SetFunctionPosition(function());
@ -236,7 +217,7 @@ void FullCodeGenerator::Generate() {
__ Push(info->scope()->GetScopeInfo()); __ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) { } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots); FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
__ push(r1); __ push(r1);
@ -297,7 +278,7 @@ void FullCodeGenerator::Generate() {
} else { } else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST; type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
} }
ArgumentsAccessStub stub(type); ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub); __ CallStub(&stub);
SetVar(arguments, r0, r1, r2); SetVar(arguments, r0, r1, r2);
@ -1187,12 +1168,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy; Label non_proxy;
__ bind(&fixed_array); __ bind(&fixed_array);
Handle<Object> feedback = Handle<Object>(
Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
isolate());
StoreFeedbackVectorSlot(slot, feedback);
__ Move(r1, FeedbackVector()); __ Move(r1, FeedbackVector());
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@ -1351,7 +1328,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure && !pretenure &&
scope()->is_function_scope() && scope()->is_function_scope() &&
info->num_literals() == 0) { info->num_literals() == 0) {
FastNewClosureStub stub(info->strict_mode(), info->is_generator()); FastNewClosureStub stub(isolate(),
info->strict_mode(),
info->is_generator());
__ mov(r2, Operand(info)); __ mov(r2, Operand(info));
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
@ -1671,13 +1650,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags; : ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags))); __ mov(r0, Operand(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2; int properties_count = constant_properties->length() / 2;
if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || if (expr->may_store_doubles() || expr->depth() > 1 ||
flags != ObjectLiteral::kFastElements || Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ Push(r3, r2, r1, r0); __ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else { } else {
FastCloneShallowObjectStub stub(properties_count); FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1816,13 +1795,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() == if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) { isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub( FastCloneShallowArrayStub stub(
isolate(),
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
allocation_site_mode, allocation_site_mode,
length); length);
__ CallStub(&stub); __ CallStub(&stub);
__ IncrementCounter( __ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1 || Serializer::enabled() || } else if (expr->depth() > 1 || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) { length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(r0, Operand(Smi::FromInt(flags))); __ mov(r0, Operand(Smi::FromInt(flags)));
__ Push(r3, r2, r1, r0); __ Push(r3, r2, r1, r0);
@ -1837,7 +1817,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
} }
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); FastCloneShallowArrayStub stub(isolate(), mode, allocation_site_mode,
length);
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1869,7 +1850,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else { } else {
__ mov(r3, Operand(Smi::FromInt(i))); __ mov(r3, Operand(Smi::FromInt(i)));
StoreArrayLiteralElementStub stub; StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1886,7 +1867,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) { void FullCodeGenerator::VisitAssignment(Assignment* expr) {
ASSERT(expr->target()->IsValidLeftHandSide()); ASSERT(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment"); Comment cmnt(masm_, "[ Assignment");
@ -2114,7 +2095,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None()); CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0); __ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize)); __ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD); CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub); __ CallStub(&stub);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2272,7 +2253,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required; Label gc_required;
Label allocated; Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map()); Handle<Map> map(isolate()->native_context()->iterator_result_map());
__ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT); __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT);
__ jmp(&allocated); __ jmp(&allocated);
@ -2343,8 +2324,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case); patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call); __ bind(&stub_call);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(isolate(), op, mode);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ jmp(&done); __ jmp(&done);
@ -2419,16 +2400,16 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op, Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ pop(r1); __ pop(r1);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
context()->Plug(r0); context()->Plug(r0);
} }
void FullCodeGenerator::EmitAssignment(Expression* expr) { void FullCodeGenerator::EmitAssignment(Expression* expr) {
ASSERT(expr->IsValidLeftHandSide()); ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local) // Left-hand side can only be a property, a global or a (parameter or local)
// slot. // slot.
@ -2628,14 +2609,15 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithIC(Call* expr) { void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression(); Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
CallFunctionFlags flags; CallIC::CallType call_type = callee->IsVariableProxy()
? CallIC::FUNCTION
: CallIC::METHOD;
// Get the target function. // Get the target function.
if (callee->IsVariableProxy()) { if (call_type == CallIC::FUNCTION) {
{ StackValueContext context(this); { StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy()); EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS); PrepareForBailout(callee, NO_REGISTERS);
@ -2643,7 +2625,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it // Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method. // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value()); __ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else { } else {
// Load the function from the receiver. // Load the function from the receiver.
ASSERT(callee->IsProperty()); ASSERT(callee->IsProperty());
@ -2654,40 +2635,19 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
__ ldr(ip, MemOperand(sp, 0)); __ ldr(ip, MemOperand(sp, 0));
__ push(ip); __ push(ip);
__ str(r0, MemOperand(sp, kPointerSize)); __ str(r0, MemOperand(sp, kPointerSize));
flags = CALL_AS_METHOD;
} }
// Load the arguments. EmitCall(expr, call_type);
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
} }
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) { Expression* key) {
// Load the key. // Load the key.
VisitForAccumulatorValue(key); VisitForAccumulatorValue(key);
Expression* callee = expr->expression(); Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
// Load the function from the receiver. // Load the function from the receiver.
ASSERT(callee->IsProperty()); ASSERT(callee->IsProperty());
@ -2700,28 +2660,12 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ push(ip); __ push(ip);
__ str(r0, MemOperand(sp, kPointerSize)); __ str(r0, MemOperand(sp, kPointerSize));
{ PreservePositionScope scope(masm()->positions_recorder()); EmitCall(expr, CallIC::METHOD);
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, CALL_AS_METHOD);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
} }
void FullCodeGenerator::EmitCallWithStub(Call* expr) { void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
// Code common for calls using the call stub. // Load the arguments.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder()); { PreservePositionScope scope(masm()->positions_recorder());
@ -2729,19 +2673,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
VisitForStackValue(args->at(i)); VisitForStackValue(args->at(i));
} }
} }
// Record source position for debugger.
SetSourcePosition(expr->position());
Handle<Object> uninitialized = // Record source position of the IC call.
TypeFeedbackInfo::UninitializedSentinel(isolate()); SetSourcePosition(expr->position());
StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); Handle<Code> ic = CallIC::initialize_stub(
__ Move(r2, FeedbackVector()); isolate(), arg_count, call_type);
__ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub); // Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2816,7 +2758,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger. // Record source position for debugger.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub); __ CallStub(&stub);
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
@ -2824,7 +2766,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0); context()->DropAndPlug(1, r0);
} else if (call_type == Call::GLOBAL_CALL) { } else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithIC(expr); EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) { } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable). // Call to a lookup slot (dynamically introduced variable).
@ -2864,16 +2806,16 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// The receiver is either the global receiver or an object found // The receiver is either the global receiver or an object found
// by LoadContextSlot. // by LoadContextSlot.
EmitCallWithStub(expr); EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) { } else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty(); Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder()); { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
if (property->key()->IsPropertyName()) { if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr); EmitCallWithLoadIC(expr);
} else { } else {
EmitKeyedCallWithIC(expr, property->key()); EmitKeyedCallWithLoadIC(expr, property->key());
} }
} else { } else {
ASSERT(call_type == Call::OTHER_CALL); ASSERT(call_type == Call::OTHER_CALL);
@ -2884,7 +2826,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex); __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ push(r1); __ push(r1);
// Emit function call. // Emit function call.
EmitCallWithStub(expr); EmitCall(expr);
} }
#ifdef DEBUG #ifdef DEBUG
@ -2921,12 +2863,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code. // Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackInfo::UninitializedSentinel(isolate());
StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
if (FLAG_pretenuring_call_new) { if (FLAG_pretenuring_call_new) {
StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
isolate()->factory()->NewAllocationSite());
ASSERT(expr->AllocationSiteFeedbackSlot() == ASSERT(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1); expr->CallNewFeedbackSlot() + 1);
} }
@ -2934,8 +2872,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Move(r2, FeedbackVector()); __ Move(r2, FeedbackVector());
__ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
CallConstructStub stub(RECORD_CALL_TARGET); CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG); PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} }
@ -3305,7 +3243,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0); __ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(r0); context()->Plug(r0);
} }
@ -3391,31 +3329,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
// This is used to determine whether or not to generate the log call.
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
}
void FullCodeGenerator::EmitSubString(CallRuntime* expr) { void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub. // Load the arguments on the stack and call the stub.
SubStringStub stub; SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3); ASSERT(args->length() == 3);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -3428,7 +3344,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub. // Load the arguments on the stack and call the stub.
RegExpExecStub stub; RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4); ASSERT(args->length() == 4);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -3578,7 +3494,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
MathPowStub stub(MathPowStub::ON_STACK); MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(r0); context()->Plug(r0);
} }
@ -3618,7 +3534,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into r0 and call the stub. // Load the argument into r0 and call the stub.
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub; NumberToStringStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(r0); context()->Plug(r0);
} }
@ -3741,7 +3657,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(1)); VisitForAccumulatorValue(args->at(1));
__ pop(r1); __ pop(r1);
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(r0); context()->Plug(r0);
} }
@ -3753,32 +3669,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
StringCompareStub stub; StringCompareStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(r0); context()->Plug(r0);
} }
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
context()->Plug(r0);
}
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() >= 2); ASSERT(args->length() >= 2);
@ -3812,7 +3708,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub; RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3); ASSERT(args->length() == 3);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -4178,7 +4074,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Record source position of the IC call. // Record source position of the IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub); __ CallStub(&stub);
@ -4310,7 +4206,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
ASSERT(expr->expression()->IsValidLeftHandSide()); ASSERT(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation"); Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
@ -4396,7 +4292,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ jmp(&stub_call); __ jmp(&stub_call);
__ bind(&slow); __ bind(&slow);
} }
ToNumberStub convert_stub; ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub); __ CallStub(&convert_stub);
// Save result for postfix expressions. // Save result for postfix expressions.
@ -4427,8 +4323,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call. // Record position before stub call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ bind(&done); __ bind(&done);
@ -4539,13 +4435,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} }
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_string())) { Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(r0, if_true); __ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip); __ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->string_string())) { } else if (String::Equals(check, factory->string_string())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false. // Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@ -4553,20 +4450,20 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable)); __ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) { } else if (String::Equals(check, factory->symbol_string())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r0, r1, SYMBOL_TYPE); __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) { } else if (String::Equals(check, factory->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex); __ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true); __ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex); __ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof && } else if (FLAG_harmony_typeof &&
check->Equals(isolate()->heap()->null_string())) { String::Equals(check, factory->null_string())) {
__ CompareRoot(r0, Heap::kNullValueRootIndex); __ CompareRoot(r0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->undefined_string())) { } else if (String::Equals(check, factory->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true); __ b(eq, if_true);
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
@ -4576,14 +4473,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tst(r1, Operand(1 << Map::kIsUndetectable)); __ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through); Split(ne, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->function_string())) { } else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true); __ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_string())) { } else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) { if (!FLAG_harmony_typeof) {
__ CompareRoot(r0, Heap::kNullValueRootIndex); __ CompareRoot(r0, Heap::kNullValueRootIndex);
@ -4636,7 +4533,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: { case Token::INSTANCEOF: {
VisitForStackValue(expr->right()); VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags); InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub); __ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true. // The stub returns 0 for true.
@ -4780,7 +4677,8 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference has_pending_message = ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate()); ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message)); __ mov(ip, Operand(has_pending_message));
__ ldr(r1, MemOperand(ip)); STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ ldrb(r1, MemOperand(ip));
__ SmiTag(r1); __ SmiTag(r1);
__ push(r1); __ push(r1);
@ -4806,7 +4704,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
ExternalReference has_pending_message = ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate()); ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message)); __ mov(ip, Operand(has_pending_message));
__ str(r1, MemOperand(ip)); STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ strb(r1, MemOperand(ip));
__ pop(r1); __ pop(r1);
ExternalReference pending_message_obj = ExternalReference pending_message_obj =

27
deps/v8/src/arm/ic-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

249
deps/v8/src/arm/lithium-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -623,6 +600,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects(); !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) { if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr); instr = AssignEnvironment(instr);
// We can't really figure out if the environment is needed or not.
instr->environment()->set_has_been_used();
} }
return instr; return instr;
@ -871,7 +850,8 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
// the it was just a plain use), so it is free to move the split child into // the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start. // the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590 // See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { if (!(instr->ClobbersRegisters() &&
instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0; int fixed = 0;
int used_at_start = 0; int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) { for (UseIterator it(instr); !it.Done(); it.Advance()) {
@ -931,18 +911,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
if (goto_instr != NULL) return goto_instr; if (goto_instr != NULL) return goto_instr;
HValue* value = instr->value(); HValue* value = instr->value();
LBranch* result = new(zone()) LBranch(UseRegister(value)); Representation r = value->representation();
// Tagged values that are not known smis or booleans require a
// deoptimization environment. If the instruction is generic no
// environment is needed since all cases are handled.
Representation rep = value->representation();
HType type = value->type(); HType type = value->type();
ToBooleanStub::Types expected = instr->expected_input_types(); ToBooleanStub::Types expected = instr->expected_input_types();
if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() && if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
!expected.IsGeneric()) {
return AssignEnvironment(result); bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
if (!easy_case &&
((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
!expected.IsGeneric())) {
branch = AssignEnvironment(branch);
} }
return result; return branch;
} }
@ -1138,8 +1120,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
? NULL ? NULL
: UseFixed(instr->context(), cp); : UseFixed(instr->context(), cp);
LOperand* input = UseRegister(instr->value()); LOperand* input = UseRegister(instr->value());
LMathAbs* result = new(zone()) LMathAbs(context, input); LInstruction* result =
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); DefineAsRegister(new(zone()) LMathAbs(context, input));
if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
if (!r.IsDouble()) result = AssignEnvironment(result);
return result;
} }
@ -1284,15 +1269,25 @@ LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
} }
LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) { LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
ASSERT(instr->representation().IsSmiOrInteger32()); ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left()); LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right()); LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp); LInstruction* result =
return AssignEnvironment(DefineAsRegister(div)); DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
(instr->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) ||
(!instr->IsMathFloorOfDiv() &&
!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
result = AssignEnvironment(result);
}
return result;
} }
@ -1346,13 +1341,25 @@ LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
} }
LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
ASSERT(instr->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation()));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
if (instr->RightIsPowerOf2()) { if (instr->RightIsPowerOf2()) {
return DoFlooringDivByPowerOf2I(instr); return DoFlooringDivByPowerOf2I(instr);
} else if (instr->right()->IsConstant()) { } else if (instr->right()->IsConstant()) {
return DoFlooringDivByConstI(instr); return DoFlooringDivByConstI(instr);
} else { } else {
return DoDivI(instr); return DoFlooringDivI(instr);
} }
} }
@ -1647,6 +1654,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) { HCompareNumericAndBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
Representation r = instr->representation(); Representation r = instr->representation();
if (r.IsSmiOrInteger32()) { if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(r)); ASSERT(instr->left()->representation().Equals(r));
@ -1801,9 +1810,16 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index()); if (!FLAG_debug_code && instr->skip_check()) return NULL;
LOperand* length = UseRegister(instr->length()); LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); LOperand* length = !index->IsConstantOperand()
? UseRegisterOrConstantAtStart(instr->length())
: UseRegisterAtStart(instr->length());
LInstruction* result = new(zone()) LBoundsCheck(index, length);
if (!FLAG_debug_code || !instr->skip_check()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -1837,20 +1853,21 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) { LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from(); Representation from = instr->from();
Representation to = instr->to(); Representation to = instr->to();
HValue* val = instr->value();
if (from.IsSmi()) { if (from.IsSmi()) {
if (to.IsTagged()) { if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value)); return DefineSameAsFirst(new(zone()) LDummyUse(value));
} }
from = Representation::Tagged(); from = Representation::Tagged();
} }
if (from.IsTagged()) { if (from.IsTagged()) {
if (to.IsDouble()) { if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LNumberUntagD* res = new(zone()) LNumberUntagD(value); LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
return AssignEnvironment(DefineAsRegister(res)); if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
} else if (to.IsSmi()) { } else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val); LOperand* value = UseRegister(val);
if (val->type().IsSmi()) { if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value)); return DefineSameAsFirst(new(zone()) LDummyUse(value));
@ -1858,66 +1875,59 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = NULL;
LInstruction* res = NULL;
HValue* val = instr->value();
if (val->type().IsSmi() || val->representation().IsSmi()) { if (val->type().IsSmi() || val->representation().IsSmi()) {
value = UseRegisterAtStart(val); LOperand* value = UseRegisterAtStart(val);
res = DefineAsRegister(new(zone()) LSmiUntag(value, false)); return DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else { } else {
value = UseRegister(val); LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = FixedTemp(d11); LOperand* temp2 = FixedTemp(d11);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value, LInstruction* result =
temp1, DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
temp2)); if (!val->representation().IsSmi()) result = AssignEnvironment(result);
res = AssignEnvironment(res); return result;
} }
return res;
} }
} else if (from.IsDouble()) { } else if (from.IsDouble()) {
if (to.IsTagged()) { if (to.IsTagged()) {
info()->MarkAsDeferredCalling(); info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister(); LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp); return AssignPointerMap(Define(result, result_temp));
return AssignPointerMap(result);
} else if (to.IsSmi()) { } else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
return AssignEnvironment( return AssignEnvironment(
DefineAsRegister(new(zone()) LDoubleToSmi(value))); DefineAsRegister(new(zone()) LDoubleToSmi(value)));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LDoubleToI* res = new(zone()) LDoubleToI(value); LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
return AssignEnvironment(DefineAsRegister(res)); if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
return result;
} }
} else if (from.IsInteger32()) { } else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling(); info()->MarkAsDeferredCalling();
if (to.IsTagged()) { if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
if (!instr->CheckFlag(HValue::kCanOverflow)) { if (!instr->CheckFlag(HValue::kCanOverflow)) {
LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value)); return DefineAsRegister(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) { } else if (val->CheckFlag(HInstruction::kUint32)) {
LOperand* value = UseRegisterAtStart(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2); LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); return AssignPointerMap(DefineAsRegister(result));
} else { } else {
LOperand* value = UseRegisterAtStart(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2); LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); return AssignPointerMap(DefineAsRegister(result));
} }
} else if (to.IsSmi()) { } else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val); LOperand* value = UseRegister(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
if (instr->CheckFlag(HValue::kCanOverflow)) { if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1926,12 +1936,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return result; return result;
} else { } else {
ASSERT(to.IsDouble()); ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) { if (val->CheckFlag(HInstruction::kUint32)) {
return DefineAsRegister( return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
new(zone()) LUint32ToDouble(UseRegister(instr->value())));
} else { } else {
return DefineAsRegister( return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
new(zone()) LInteger32ToDouble(Use(instr->value())));
} }
} }
} }
@ -1942,7 +1950,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value)); LInstruction* result = new(zone()) LCheckNonSmi(value);
if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result);
return result;
} }
@ -1966,15 +1976,12 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
LOperand* value = NULL; if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
if (!instr->CanOmitMapChecks()) { LOperand* value = UseRegisterAtStart(instr->value());
value = UseRegisterAtStart(instr->value()); LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
if (instr->has_migration_target()) info()->MarkAsDeferredCalling(); if (instr->HasMigrationTarget()) {
} info()->MarkAsDeferredCalling();
LCheckMaps* result = new(zone()) LCheckMaps(value); result = AssignPointerMap(result);
if (!instr->CanOmitMapChecks()) {
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
} }
return result; return result;
} }
@ -2072,7 +2079,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value()); LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result = LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context)); DefineAsRegister(new(zone()) LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -2087,7 +2097,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value()); value = UseRegister(instr->value());
} }
LInstruction* result = new(zone()) LStoreContextSlot(context, value); LInstruction* result = new(zone()) LStoreContextSlot(context, value);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -2122,7 +2135,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32()); ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind(); ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL; LInstruction* result = NULL;
if (!instr->is_typed_elements()) { if (!instr->is_typed_elements()) {
LOperand* obj = NULL; LOperand* obj = NULL;
@ -2132,24 +2145,28 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->representation().IsSmiOrTagged()); ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements()); obj = UseRegisterAtStart(instr->elements());
} }
result = new(zone()) LLoadKeyed(obj, key); result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else { } else {
ASSERT( ASSERT(
(instr->representation().IsInteger32() && (instr->representation().IsInteger32() &&
!IsDoubleOrFloatElementsKind(instr->elements_kind())) || !IsDoubleOrFloatElementsKind(elements_kind)) ||
(instr->representation().IsDouble() && (instr->representation().IsDouble() &&
IsDoubleOrFloatElementsKind(instr->elements_kind()))); IsDoubleOrFloatElementsKind(elements_kind)));
LOperand* backing_store = UseRegister(instr->elements()); LOperand* backing_store = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(backing_store, key); result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
} }
DefineAsRegister(result); if ((instr->is_external() || instr->is_fixed_typed_array()) ?
// An unsigned int array load might overflow and cause a deopt, make sure it // see LCodeGen::DoLoadKeyedExternalArray
// has an environment. ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
bool can_deoptimize = instr->RequiresHoleCheck() || elements_kind == UINT32_ELEMENTS) &&
elements_kind == EXTERNAL_UINT32_ELEMENTS || !instr->CheckFlag(HInstruction::kUint32)) :
elements_kind == UINT32_ELEMENTS; // see LCodeGen::DoLoadKeyedFixedDoubleArray and
return can_deoptimize ? AssignEnvironment(result) : result; // LCodeGen::DoLoadKeyedFixedArray
instr->RequiresHoleCheck()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -2225,17 +2242,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
return result; return result;
} else { } else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, NULL); new(zone()) LTransitionElementsKind(object, context, NULL);
return AssignPointerMap(result); return MarkAsCall(result, instr);
} }
} }
@ -2279,11 +2297,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
// We need a temporary register for write barrier of the map field. // We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
if (instr->field_representation().IsHeapObject()) { if (!instr->access().IsExternalMemory() &&
if (!instr->value()->type().IsHeapObject()) { instr->field_representation().IsHeapObject() &&
return AssignEnvironment(result); !instr->value()->type().IsHeapObject()) {
} result = AssignEnvironment(result);
} }
return result; return result;
} }
@ -2315,7 +2333,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context()); LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result = LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index); new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); return AssignPointerMap(DefineAsRegister(result));
} }
@ -2371,7 +2389,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else { } else {
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index); Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
@ -2478,6 +2496,7 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment(); HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined(); HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(), HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(), instr->arguments_count(),
@ -2538,7 +2557,9 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegister(instr->object()); LOperand* object = UseRegister(instr->object());
LOperand* index = UseRegister(instr->index()); LOperand* index = UseRegister(instr->index());
return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
LInstruction* result = DefineSameAsFirst(load);
return AssignPointerMap(result);
} }
} } // namespace v8::internal } } // namespace v8::internal

69
deps/v8/src/arm/lithium-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_LITHIUM_ARM_H_ #ifndef V8_ARM_LITHIUM_ARM_H_
#define V8_ARM_LITHIUM_ARM_H_ #define V8_ARM_LITHIUM_ARM_H_
@ -97,6 +74,7 @@ class LCodeGen;
V(DummyUse) \ V(DummyUse) \
V(FlooringDivByConstI) \ V(FlooringDivByConstI) \
V(FlooringDivByPowerOf2I) \ V(FlooringDivByPowerOf2I) \
V(FlooringDivI) \
V(ForInCacheArray) \ V(ForInCacheArray) \
V(ForInPrepareMap) \ V(ForInPrepareMap) \
V(FunctionLiteral) \ V(FunctionLiteral) \
@ -261,7 +239,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators. // Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); } bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const { return IsCall(); } virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
return IsCall();
}
// Interface to the register allocator and iterators. // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); } bool IsMarkedAsCall() const { return IsCall(); }
@ -713,14 +693,14 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public: public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) { LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = left; inputs_[0] = dividend;
inputs_[1] = right; inputs_[1] = divisor;
temps_[0] = temp; temps_[0] = temp;
} }
LOperand* left() { return inputs_[0]; } LOperand* dividend() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; } LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; } LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
@ -767,6 +747,23 @@ class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
}; };
class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = dividend;
inputs_[1] = divisor;
temps_[0] = temp;
}
LOperand* dividend() { return inputs_[0]; }
LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
};
class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public: public:
LMulI(LOperand* left, LOperand* right) { LMulI(LOperand* left, LOperand* right) {
@ -1968,7 +1965,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime) DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs; return save_doubles() == kDontSaveFPRegs;
} }
@ -2164,7 +2161,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const { Representation representation() const {
return hydrogen()->field_representation(); return hydrogen()->field_representation();
} }
@ -2379,7 +2375,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> { class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
public: public:
explicit LCheckMaps(LOperand* value) { explicit LCheckMaps(LOperand* value = NULL) {
inputs_[0] = value; inputs_[0] = value;
} }
@ -2696,6 +2692,8 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
next_block_(NULL), next_block_(NULL),
allocator_(allocator) { } allocator_(allocator) { }
Isolate* isolate() const { return graph_->isolate(); }
// Build the sequence for the graph. // Build the sequence for the graph.
LPlatformChunk* Build(); LPlatformChunk* Build();
@ -2722,12 +2720,13 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathClz32(HUnaryMathOperation* instr); LInstruction* DoMathClz32(HUnaryMathOperation* instr);
LInstruction* DoDivByPowerOf2I(HDiv* instr); LInstruction* DoDivByPowerOf2I(HDiv* instr);
LInstruction* DoDivByConstI(HDiv* instr); LInstruction* DoDivByConstI(HDiv* instr);
LInstruction* DoDivI(HBinaryOperation* instr); LInstruction* DoDivI(HDiv* instr);
LInstruction* DoModByPowerOf2I(HMod* instr); LInstruction* DoModByPowerOf2I(HMod* instr);
LInstruction* DoModByConstI(HMod* instr); LInstruction* DoModByConstI(HMod* instr);
LInstruction* DoModI(HMod* instr); LInstruction* DoModI(HMod* instr);
LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr); LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
private: private:
enum Status { enum Status {

412
deps/v8/src/arm/lithium-codegen-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -86,13 +63,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code); PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
void LCodeGen::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
} }
@ -207,7 +177,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; Allocate local context"); Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1. // Argument to NewContext is the function, which is in r1.
if (heap_slots <= FastNewContextStub::kMaximumSlots) { if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots); FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
__ push(r1); __ push(r1);
@ -714,6 +684,16 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
} }
int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
int size = masm()->CallSize(code, mode);
if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
}
return size;
}
void LCodeGen::CallCode(Handle<Code> code, void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode, RelocInfo::Mode mode,
LInstruction* instr, LInstruction* instr,
@ -783,6 +763,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) { Safepoint::DeoptMode mode) {
environment->set_has_been_used();
if (!environment->HasBeenRegistered()) { if (!environment->HasBeenRegistered()) {
// Physical stack frame layout: // Physical stack frame layout:
// -x ............. -4 0 ..................................... y // -x ............. -4 0 ..................................... y
@ -906,7 +887,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length(); int length = deoptimizations_.length();
if (length == 0) return; if (length == 0) return;
Handle<DeoptimizationInputData> data = Handle<DeoptimizationInputData> data =
factory()->NewDeoptimizationInputData(length, TENURED); DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations = Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory()); translations_.CreateByteArray(isolate()->factory());
@ -1095,18 +1076,18 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
switch (instr->hydrogen()->major_key()) { switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: { case CodeStub::RegExpExec: {
RegExpExecStub stub; RegExpExecStub stub(isolate());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
case CodeStub::SubString: { case CodeStub::SubString: {
SubStringStub stub; SubStringStub stub(isolate());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
case CodeStub::StringCompare: { case CodeStub::StringCompare: {
StringCompareStub stub; StringCompareStub stub(isolate());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
default: default:
@ -1293,7 +1274,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend()); Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor(); int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
ASSERT(!result.is(dividend)); ASSERT(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero. // Check for (0 / -x) that will produce negative zero.
@ -1363,15 +1344,16 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
} }
// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) { void LCodeGen::DoDivI(LDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen(); HBinaryOperation* hdiv = instr->hydrogen();
Register left = ToRegister(instr->left()); Register dividend = ToRegister(instr->dividend());
Register right = ToRegister(instr->right()); Register divisor = ToRegister(instr->divisor());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
// Check for x / 0. // Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero()); __ cmp(divisor, Operand::Zero());
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
} }
@ -1380,10 +1362,10 @@ void LCodeGen::DoDivI(LDivI* instr) {
Label positive; Label positive;
if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above. // Do the test only if it hadn't be done above.
__ cmp(right, Operand::Zero()); __ cmp(divisor, Operand::Zero());
} }
__ b(pl, &positive); __ b(pl, &positive);
__ cmp(left, Operand::Zero()); __ cmp(dividend, Operand::Zero());
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
__ bind(&positive); __ bind(&positive);
} }
@ -1394,39 +1376,30 @@ void LCodeGen::DoDivI(LDivI* instr) {
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
// We don't need to check for overflow when truncating with sdiv // We don't need to check for overflow when truncating with sdiv
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt. // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt)); __ cmp(dividend, Operand(kMinInt));
__ cmp(right, Operand(-1), eq); __ cmp(divisor, Operand(-1), eq);
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
} }
if (CpuFeatures::IsSupported(SUDIV)) { if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV); CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right); __ sdiv(result, dividend, divisor);
} else { } else {
DoubleRegister vleft = ToDoubleRegister(instr->temp()); DoubleRegister vleft = ToDoubleRegister(instr->temp());
DoubleRegister vright = double_scratch0(); DoubleRegister vright = double_scratch0();
__ vmov(double_scratch0().low(), left); __ vmov(double_scratch0().low(), dividend);
__ vcvt_f64_s32(vleft, double_scratch0().low()); __ vcvt_f64_s32(vleft, double_scratch0().low());
__ vmov(double_scratch0().low(), right); __ vmov(double_scratch0().low(), divisor);
__ vcvt_f64_s32(vright, double_scratch0().low()); __ vcvt_f64_s32(vright, double_scratch0().low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result. __ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft); __ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low()); __ vmov(result, double_scratch0().low());
} }
if (hdiv->IsMathFloorOfDiv()) { if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
Label done;
Register remainder = scratch0();
__ mls(remainder, result, right, left);
__ cmp(remainder, Operand::Zero());
__ b(eq, &done);
__ eor(remainder, remainder, Operand(right));
__ add(result, result, Operand(remainder, ASR, 31));
__ bind(&done);
} else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Compute remainder and deopt if it's not zero. // Compute remainder and deopt if it's not zero.
Register remainder = scratch0(); Register remainder = scratch0();
__ mls(remainder, result, right, left); __ mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero()); __ cmp(remainder, Operand::Zero());
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
} }
@ -1476,19 +1449,21 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
} }
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
// Note that we could emit branch-free code, but that would need one more // If the negation could not overflow, simply shifting is OK.
// register. if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
if (divisor == -1) {
DeoptimizeIf(vs, instr->environment());
__ mov(result, Operand(dividend, ASR, shift));
} else {
__ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
__ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
}
} else {
__ mov(result, Operand(dividend, ASR, shift)); __ mov(result, Operand(dividend, ASR, shift));
return;
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
DeoptimizeIf(vs, instr->environment());
return;
} }
__ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
__ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
} }
@ -1538,6 +1513,69 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
} }
// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
Register left = ToRegister(instr->dividend());
Register right = ToRegister(instr->divisor());
Register result = ToRegister(instr->result());
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive;
if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
// Do the test only if it hadn't be done above.
__ cmp(right, Operand::Zero());
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
__ bind(&positive);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) &&
(!CpuFeatures::IsSupported(SUDIV) ||
!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
// We don't need to check for overflow when truncating with sdiv
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
DeoptimizeIf(eq, instr->environment());
}
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(result, left, right);
} else {
DoubleRegister vleft = ToDoubleRegister(instr->temp());
DoubleRegister vright = double_scratch0();
__ vmov(double_scratch0().low(), left);
__ vcvt_f64_s32(vleft, double_scratch0().low());
__ vmov(double_scratch0().low(), right);
__ vcvt_f64_s32(vright, double_scratch0().low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
__ vcvt_s32_f64(double_scratch0().low(), vleft);
__ vmov(result, double_scratch0().low());
}
Label done;
Register remainder = scratch0();
__ mls(remainder, result, right, left);
__ cmp(remainder, Operand::Zero());
__ b(eq, &done);
__ eor(remainder, remainder, Operand(right));
__ add(result, result, Operand(remainder, ASR, 31));
__ bind(&done);
}
void LCodeGen::DoMulI(LMulI* instr) { void LCodeGen::DoMulI(LMulI* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
// Note that result may alias left. // Note that result may alias left.
@ -1835,9 +1873,16 @@ void LCodeGen::DoConstantE(LConstantE* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) { void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value(isolate()); Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check; AllowDeferredHandleDereference smi_check;
__ Move(ToRegister(instr->result()), value); if (instr->hydrogen()->HasObjectMap()) {
Handle<Map> object_map = instr->hydrogen()->ObjectMap().handle();
ASSERT(object->IsHeapObject());
ASSERT(!object_map->is_stable() ||
*object_map == Handle<HeapObject>::cast(object)->map());
USE(object_map);
}
__ Move(ToRegister(instr->result()), object);
} }
@ -2091,11 +2136,11 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->right()).is(r0)); ASSERT(ToRegister(instr->right()).is(r0));
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpICStub stub(instr->op(), NO_OVERWRITE); BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code // Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position. // is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm()); Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
@ -2686,8 +2731,8 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters); InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
@ -2783,7 +2828,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
flags | InstanceofStub::kCallSiteInlineCheck); flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>( flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject); flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags); InstanceofStub stub(isolate(), flags);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
LoadContextFromDeferred(instr->context()); LoadContextFromDeferred(instr->context());
@ -2805,7 +2850,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
__ nop(); __ nop();
} }
CallCodeGeneric(stub.GetCode(isolate()), CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET, RelocInfo::CODE_TARGET,
instr, instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@ -3309,7 +3354,8 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
__ add(scratch0(), scratch0(), Operand(key, LSL, shift_size)); __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
} else { } else {
ASSERT_EQ(-1, shift_size); ASSERT_EQ(-1, shift_size);
__ add(scratch0(), scratch0(), Operand(key, LSR, 1)); // key can be negative, so using ASR here.
__ add(scratch0(), scratch0(), Operand(key, ASR, 1));
} }
return MemOperand(base, scratch0()); return MemOperand(base, scratch0());
} }
@ -3801,7 +3847,7 @@ void LCodeGen::DoPower(LPower* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2)); ASSERT(ToDoubleRegister(instr->result()).is(d2));
if (exponent_type.IsSmi()) { if (exponent_type.IsSmi()) {
MathPowStub stub(MathPowStub::TAGGED); MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub); __ CallStub(&stub);
} else if (exponent_type.IsTagged()) { } else if (exponent_type.IsTagged()) {
Label no_deopt; Label no_deopt;
@ -3811,14 +3857,14 @@ void LCodeGen::DoPower(LPower* instr) {
__ cmp(r6, Operand(ip)); __ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt); __ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED); MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub); __ CallStub(&stub);
} else if (exponent_type.IsInteger32()) { } else if (exponent_type.IsInteger32()) {
MathPowStub stub(MathPowStub::INTEGER); MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
ASSERT(exponent_type.IsDouble()); ASSERT(exponent_type.IsDouble());
MathPowStub stub(MathPowStub::DOUBLE); MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub); __ CallStub(&stub);
} }
} }
@ -3925,8 +3971,8 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
int arity = instr->arity(); int arity = instr->arity();
CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
@ -3938,8 +3984,8 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ mov(r0, Operand(instr->arity())); __ mov(r0, Operand(instr->arity()));
// No cell in r2 for construct type feedback in optimized code // No cell in r2 for construct type feedback in optimized code
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} }
@ -3957,8 +4003,8 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
: DONT_OVERRIDE; : DONT_OVERRIDE;
if (instr->arity() == 0) { if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(kind, override_mode); ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) { } else if (instr->arity() == 1) {
Label done; Label done;
if (IsFastPackedElementsKind(kind)) { if (IsFastPackedElementsKind(kind)) {
@ -3970,18 +4016,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ b(eq, &packed_case); __ b(eq, &packed_case);
ElementsKind holey_kind = GetHoleyElementsKind(kind); ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); ArraySingleArgumentConstructorStub stub(isolate(),
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); holey_kind,
override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done); __ jmp(&done);
__ bind(&packed_case); __ bind(&packed_case);
} }
ArraySingleArgumentConstructorStub stub(kind, override_mode); ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done); __ bind(&done);
} else { } else {
ArrayNArgumentsConstructorStub stub(kind, override_mode); ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} }
} }
@ -4028,7 +4076,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
return; return;
} }
Handle<Map> transition = instr->transition();
SmiCheck check_needed = SmiCheck check_needed =
instr->hydrogen()->value()->IsHeapObject() instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@ -4042,19 +4089,21 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ SmiTst(value); __ SmiTst(value);
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
// We know that value is a smi now, so we can omit the check below. // We know now that value is not a smi, so we can omit the check below.
check_needed = OMIT_SMI_CHECK; check_needed = OMIT_SMI_CHECK;
} }
} else if (representation.IsDouble()) { } else if (representation.IsDouble()) {
ASSERT(transition.is_null());
ASSERT(access.IsInobject()); ASSERT(access.IsInobject());
ASSERT(!instr->hydrogen()->has_transition());
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
DwVfpRegister value = ToDoubleRegister(instr->value()); DwVfpRegister value = ToDoubleRegister(instr->value());
__ vstr(value, FieldMemOperand(object, offset)); __ vstr(value, FieldMemOperand(object, offset));
return; return;
} }
if (!transition.is_null()) { if (instr->hydrogen()->has_transition()) {
Handle<Map> transition = instr->hydrogen()->transition_map();
AddDeprecationDependency(transition);
__ mov(scratch, Operand(transition)); __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) { if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
@ -4119,38 +4168,29 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
} }
void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (FLAG_debug_code && check->hydrogen()->skip_check()) { Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
Operand index = ToOperand(instr->index());
Register length = ToRegister(instr->length());
__ cmp(length, index);
cc = ReverseCondition(cc);
} else {
Register index = ToRegister(instr->index());
Operand length = ToOperand(instr->length());
__ cmp(index, length);
}
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done; Label done;
__ b(NegateCondition(condition), &done); __ b(NegateCondition(cc), &done);
__ stop("eliminated bounds check failed"); __ stop("eliminated bounds check failed");
__ bind(&done); __ bind(&done);
} else { } else {
DeoptimizeIf(condition, check->environment()); DeoptimizeIf(cc, instr->environment());
} }
} }
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->hydrogen()->length()->representation().IsSmi()) {
__ mov(ip, Operand(Smi::FromInt(constant_index)));
} else {
__ mov(ip, Operand(constant_index));
}
__ cmp(ip, ToRegister(instr->length()));
} else {
__ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
}
Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
ApplyCheckIf(condition, instr);
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register external_pointer = ToRegister(instr->elements()); Register external_pointer = ToRegister(instr->elements());
Register key = no_reg; Register key = no_reg;
@ -4381,15 +4421,15 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
scratch, GetLinkRegisterState(), kDontSaveFPRegs); scratch, GetLinkRegisterState(), kDontSaveFPRegs);
} else { } else {
ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(object_reg.is(r0));
PushSafepointRegistersScope scope( PushSafepointRegistersScope scope(
this, Safepoint::kWithRegistersAndDoubles); this, Safepoint::kWithRegistersAndDoubles);
__ Move(r0, object_reg);
__ Move(r1, to_map); __ Move(r1, to_map);
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub); __ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles( RecordSafepointWithRegistersAndDoubles(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); instr->pointer_map(), 0, Safepoint::kLazyDeopt);
} }
__ bind(&not_applicable); __ bind(&not_applicable);
} }
@ -4409,9 +4449,10 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->context()).is(cp));
ASSERT(ToRegister(instr->left()).is(r1)); ASSERT(ToRegister(instr->left()).is(r1));
ASSERT(ToRegister(instr->right()).is(r0)); ASSERT(ToRegister(instr->right()).is(r0));
StringAddStub stub(instr->hydrogen()->flags(), StringAddStub stub(isolate(),
instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag()); instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
@ -5106,7 +5147,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
Register object_; Register object_;
}; };
if (instr->hydrogen()->CanOmitMapChecks()) return; if (instr->hydrogen()->IsStabilityCheck()) {
const UniqueSet<Map>* maps = instr->hydrogen()->maps();
for (int i = 0; i < maps->size(); ++i) {
AddStabilityDependency(maps->at(i).handle());
}
return;
}
Register map_reg = scratch0(); Register map_reg = scratch0();
LOperand* input = instr->value(); LOperand* input = instr->value();
@ -5116,22 +5164,22 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL; DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->has_migration_target()) { if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg); deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps()); __ bind(deferred->check_maps());
} }
UniqueSet<Map> map_set = instr->hydrogen()->map_set(); const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success; Label success;
for (int i = 0; i < map_set.size() - 1; i++) { for (int i = 0; i < maps->size() - 1; i++) {
Handle<Map> map = map_set.at(i).handle(); Handle<Map> map = maps->at(i).handle();
__ CompareMap(map_reg, map, &success); __ CompareMap(map_reg, map, &success);
__ b(eq, &success); __ b(eq, &success);
} }
Handle<Map> map = map_set.at(map_set.size() - 1).handle(); Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(map_reg, map, &success); __ CompareMap(map_reg, map, &success);
if (instr->hydrogen()->has_migration_target()) { if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry()); __ b(ne, deferred->entry());
} else { } else {
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
@ -5301,7 +5349,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ push(size); __ push(size);
} else { } else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size)); if (size >= 0 && size <= Smi::kMaxValue) {
__ Push(Smi::FromInt(size));
} else {
// We should never get here at runtime => abort
__ stop("invalid allocation size");
return;
}
} }
int flags = AllocateDoubleAlignFlag::encode( int flags = AllocateDoubleAlignFlag::encode(
@ -5381,10 +5435,11 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure(); bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) { if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->strict_mode(), FastNewClosureStub stub(isolate(),
instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator()); instr->hydrogen()->is_generator());
__ mov(r2, Operand(instr->hydrogen()->shared_info())); __ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else { } else {
__ mov(r2, Operand(instr->hydrogen()->shared_info())); __ mov(r2, Operand(instr->hydrogen()->shared_info()));
__ mov(r1, Operand(pretenure ? factory()->true_value() __ mov(r1, Operand(pretenure ? factory()->true_value()
@ -5421,13 +5476,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Handle<String> type_name) { Handle<String> type_name) {
Condition final_branch_condition = kNoCondition; Condition final_branch_condition = kNoCondition;
Register scratch = scratch0(); Register scratch = scratch0();
if (type_name->Equals(heap()->number_string())) { Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label); __ JumpIfSmi(input, true_label);
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->string_string())) { } else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
__ b(ge, false_label); __ b(ge, false_label);
@ -5435,22 +5491,23 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable)); __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->symbol_string())) { } else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->boolean_string())) { } else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex); __ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label); __ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex); __ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq; final_branch_condition = eq;
} else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { } else if (FLAG_harmony_typeof &&
String::Equals(type_name, factory->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex); __ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->undefined_string())) { } else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label); __ b(eq, true_label);
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
@ -5460,7 +5517,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(scratch, Operand(1 << Map::kIsUndetectable)); __ tst(scratch, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne; final_branch_condition = ne;
} else if (type_name->Equals(heap()->function_string())) { } else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
Register type_reg = scratch; Register type_reg = scratch;
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
@ -5469,7 +5526,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq; final_branch_condition = eq;
} else if (type_name->Equals(heap()->object_string())) { } else if (String::Equals(type_name, factory->object_string())) {
Register map = scratch; Register map = scratch;
__ JumpIfSmi(input, false_label); __ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) { if (!FLAG_harmony_typeof) {
@ -5607,12 +5664,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip)); __ cmp(sp, Operand(ip));
__ b(hs, &done); __ b(hs, &done);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); Handle<Code> stack_check = isolate()->builtins()->StackCheck();
PredictableCodeSizeScope predictable(masm(),
CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
ASSERT(instr->context()->IsRegister()); ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->context()).is(cp));
CallCode(isolate()->builtins()->StackCheck(), CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
RelocInfo::CODE_TARGET,
instr);
__ bind(&done); __ bind(&done);
} else { } else {
ASSERT(instr->hydrogen()->is_backwards_branch()); ASSERT(instr->hydrogen()->is_backwards_branch());
@ -5716,13 +5773,61 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
} }
void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ Push(object);
__ Push(index);
__ mov(cp, Operand::Zero());
__ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
RecordSafepointWithRegisters(
instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
Register result,
Register object,
Register index)
: LDeferredCode(codegen),
instr_(instr),
result_(result),
object_(object),
index_(index) {
}
virtual void Generate() V8_OVERRIDE {
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register result_;
Register object_;
Register index_;
};
Register object = ToRegister(instr->object()); Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index()); Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = scratch0(); Register scratch = scratch0();
DeferredLoadMutableDouble* deferred;
deferred = new(zone()) DeferredLoadMutableDouble(
this, instr, result, object, index);
Label out_of_object, done; Label out_of_object, done;
__ tst(index, Operand(Smi::FromInt(1)));
__ b(ne, deferred->entry());
__ mov(index, Operand(index, ASR, 1));
__ cmp(index, Operand::Zero()); __ cmp(index, Operand::Zero());
__ b(lt, &out_of_object); __ b(lt, &out_of_object);
@ -5738,6 +5843,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
__ ldr(result, FieldMemOperand(scratch, __ ldr(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize)); FixedArray::kHeaderSize - kPointerSize));
__ bind(deferred->exit());
__ bind(&done); __ bind(&done);
} }

38
deps/v8/src/arm/lithium-codegen-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_ #ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_
@ -35,7 +12,7 @@
#include "lithium-codegen.h" #include "lithium-codegen.h"
#include "safepoint-table.h" #include "safepoint-table.h"
#include "scopes.h" #include "scopes.h"
#include "v8utils.h" #include "utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -141,6 +118,10 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
// Parallel move support. // Parallel move support.
void DoParallelMove(LParallelMove* move); void DoParallelMove(LParallelMove* move);
@ -182,8 +163,6 @@ class LCodeGen: public LCodeGenBase {
int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
void SaveCallerDoubles(); void SaveCallerDoubles();
@ -205,6 +184,8 @@ class LCodeGen: public LCodeGenBase {
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
}; };
int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
void CallCode( void CallCode(
Handle<Code> code, Handle<Code> code,
RelocInfo::Mode mode, RelocInfo::Mode mode,
@ -258,7 +239,6 @@ class LCodeGen: public LCodeGenBase {
LEnvironment* environment, LEnvironment* environment,
Deoptimizer::BailoutType bailout_type); Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition condition, LEnvironment* environment); void DeoptimizeIf(Condition condition, LEnvironment* environment);
void ApplyCheckIf(Condition condition, LBoundsCheck* check);
void AddToTranslation(LEnvironment* environment, void AddToTranslation(LEnvironment* environment,
Translation* translation, Translation* translation,

98
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -33,11 +10,22 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
static const Register kSavedValueRegister = { 9 }; // We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValueRegister kRootRegister
LGapResolver::LGapResolver(LCodeGen* owner) LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false), : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { } saved_destination_(NULL), need_to_restore_root_(false) { }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) { void LGapResolver::Resolve(LParallelMove* parallel_move) {
@ -67,6 +55,12 @@ void LGapResolver::Resolve(LParallelMove* parallel_move) {
} }
} }
if (need_to_restore_root_) {
ASSERT(kSavedValueRegister.is(kRootRegister));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
moves_.Rewind(0); moves_.Rewind(0);
} }
@ -155,20 +149,21 @@ void LGapResolver::Verify() {
#endif #endif
} }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) { void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of // We save in a register the source of that move and we remember its
// moves_[root_index]. After performing all moves in the tree rooted // destination. Then we mark this move as resolved so the cycle is
// in that move, we save the value to that source. // broken and we can perform the other moves.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_); ASSERT(!in_cycle_);
in_cycle_ = true; in_cycle_ = true;
LOperand* source = moves_[index].source(); LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination(); saved_destination_ = moves_[index].destination();
if (source->IsRegister()) { if (source->IsRegister()) {
need_to_restore_root_ = true;
__ mov(kSavedValueRegister, cgen_->ToRegister(source)); __ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
need_to_restore_root_ = true;
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
@ -186,7 +181,6 @@ void LGapResolver::RestoreValue() {
ASSERT(in_cycle_); ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL); ASSERT(saved_destination_ != NULL);
// Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) { if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) { } else if (saved_destination_->IsStackSlot()) {
@ -226,20 +220,15 @@ void LGapResolver::EmitMove(int index) {
} else { } else {
ASSERT(destination->IsStackSlot()); ASSERT(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) {
if (!destination_operand.OffsetIsUint12Encodable()) { // ip is overwritten while saving the value to the destination.
// ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source
// Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read.
// destroys ip, since that happens before the value is read. __ vldr(kScratchDoubleReg.low(), source_operand);
__ vldr(kScratchDoubleReg.low(), source_operand); __ vstr(kScratchDoubleReg.low(), destination_operand);
__ vstr(kScratchDoubleReg.low(), destination_operand);
} else {
__ ldr(ip, source_operand);
__ str(ip, destination_operand);
}
} else { } else {
__ ldr(kSavedValueRegister, source_operand); __ ldr(ip, source_operand);
__ str(kSavedValueRegister, destination_operand); __ str(ip, destination_operand);
} }
} }
@ -261,14 +250,14 @@ void LGapResolver::EmitMove(int index) {
} else { } else {
ASSERT(destination->IsStackSlot()); ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
Representation r = cgen_->IsSmi(constant_source) Representation r = cgen_->IsSmi(constant_source)
? Representation::Smi() : Representation::Integer32(); ? Representation::Smi() : Representation::Integer32();
if (cgen_->IsInteger32(constant_source)) { if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister, __ mov(kSavedValueRegister,
Operand(cgen_->ToRepresentation(constant_source, r))); Operand(cgen_->ToRepresentation(constant_source, r)));
} else { } else {
__ Move(kSavedValueRegister, __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
cgen_->ToHandle(constant_source));
} }
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
} }
@ -290,16 +279,11 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsDoubleStackSlot()); ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) { if (in_cycle_) {
// kSavedDoubleValueRegister was used to break the cycle, // kScratchDoubleReg was used to break the cycle.
// but kSavedValueRegister is free. __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg);
MemOperand source_high_operand = __ vldr(kScratchDoubleReg, source_operand);
cgen_->ToHighMemOperand(source); __ vstr(kScratchDoubleReg, destination_operand);
MemOperand destination_high_operand = __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg);
cgen_->ToHighMemOperand(destination);
__ ldr(kSavedValueRegister, source_operand);
__ str(kSavedValueRegister, destination_operand);
__ ldr(kSavedValueRegister, source_high_operand);
__ str(kSavedValueRegister, destination_high_operand);
} else { } else {
__ vldr(kScratchDoubleReg, source_operand); __ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand); __ vstr(kScratchDoubleReg, destination_operand);

31
deps/v8/src/arm/lithium-gap-resolver-arm.h

@ -1,29 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ #ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ #define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
@ -76,6 +53,10 @@ class LGapResolver V8_FINAL BASE_EMBEDDED {
int root_index_; int root_index_;
bool in_cycle_; bool in_cycle_;
LOperand* saved_destination_; LOperand* saved_destination_;
// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
}; };
} } // namespace v8::internal } } // namespace v8::internal

153
deps/v8/src/arm/macro-assembler-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits.h> // For LONG_MIN, LONG_MAX. #include <limits.h> // For LONG_MIN, LONG_MAX.
@ -100,19 +77,31 @@ int MacroAssembler::CallSize(
int size = 2 * kInstrSize; int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC; Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target); intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) { if (!Operand(immediate, rmode).is_single_instruction(isolate(),
this,
mov_instr)) {
size += kInstrSize; size += kInstrSize;
} }
return size; return size;
} }
int MacroAssembler::CallSizeNotPredictableCodeSize( int MacroAssembler::CallStubSize(
Address target, RelocInfo::Mode rmode, Condition cond) { CodeStub* stub, TypeFeedbackId ast_id, Condition cond) {
return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
Address target,
RelocInfo::Mode rmode,
Condition cond) {
int size = 2 * kInstrSize; int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC; Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target); intptr_t immediate = reinterpret_cast<intptr_t>(target);
if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) { if (!Operand(immediate, rmode).is_single_instruction(isolate,
NULL,
mov_instr)) {
size += kInstrSize; size += kInstrSize;
} }
return size; return size;
@ -272,11 +261,11 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
void MacroAssembler::And(Register dst, Register src1, const Operand& src2, void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) { Condition cond) {
if (!src2.is_reg() && if (!src2.is_reg() &&
!src2.must_output_reloc_info(this) && !src2.must_output_reloc_info(isolate(), this) &&
src2.immediate() == 0) { src2.immediate() == 0) {
mov(dst, Operand::Zero(), LeaveCC, cond); mov(dst, Operand::Zero(), LeaveCC, cond);
} else if (!src2.is_single_instruction(this) && } else if (!src2.is_single_instruction(isolate(), this) &&
!src2.must_output_reloc_info(this) && !src2.must_output_reloc_info(isolate(), this) &&
CpuFeatures::IsSupported(ARMv7) && CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) { IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0, ubfx(dst, src1, 0,
@ -549,7 +538,8 @@ void MacroAssembler::RecordWrite(Register object,
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
push(lr); push(lr);
} }
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub); CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) { if (lr_status == kLRHasNotBeenSaved) {
pop(lr); pop(lr);
@ -598,7 +588,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
} }
push(lr); push(lr);
StoreBufferOverflowStub store_buffer_overflow = StoreBufferOverflowStub store_buffer_overflow =
StoreBufferOverflowStub(fp_mode); StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow); CallStub(&store_buffer_overflow);
pop(lr); pop(lr);
bind(&done); bind(&done);
@ -650,7 +640,7 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() { void MacroAssembler::PushSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time. // Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled(isolate()));
PushSafepointRegisters(); PushSafepointRegisters();
// Only save allocatable registers. // Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
@ -664,7 +654,7 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() { void MacroAssembler::PopSafepointRegistersAndDoubles() {
// Number of d-regs not known at snapshot time. // Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled(isolate()));
// Only save allocatable registers. // Only save allocatable registers.
ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14)); ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
ASSERT(DwVfpRegister::NumReservedRegisters() == 2); ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
@ -706,7 +696,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// Number of d-regs not known at snapshot time. // Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled(isolate()));
// General purpose registers are pushed last on the stack. // General purpose registers are pushed last on the stack.
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@ -789,6 +779,14 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
// If needed, restore wanted bits of FPSCR. // If needed, restore wanted bits of FPSCR.
Label fpscr_done; Label fpscr_done;
vmrs(scratch); vmrs(scratch);
if (emit_debug_code()) {
Label rounding_mode_correct;
tst(scratch, Operand(kVFPRoundingModeMask));
b(eq, &rounding_mode_correct);
// Don't call Assert here, since Runtime_Abort could re-enter here.
stop("Default rounding mode not set");
bind(&rounding_mode_correct);
}
tst(scratch, Operand(kVFPDefaultNaNModeControlBit)); tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
b(ne, &fpscr_done); b(ne, &fpscr_done);
orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit)); orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
@ -912,7 +910,7 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
} else { } else {
PredictableCodeSizeScope predictible_code_size_scope( PredictableCodeSizeScope predictible_code_size_scope(
this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); this, kNoCodeAgeSequenceLength);
// The following three instructions must remain together and unmodified // The following three instructions must remain together and unmodified
// for code aging to work properly. // for code aging to work properly.
if (isolate()->IsCodePreAgingActive()) { if (isolate()->IsCodePreAgingActive()) {
@ -989,7 +987,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
} }
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_ool_constant_pool) {
str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
LoadConstantPoolPointerRegister();
} }
mov(ip, Operand(CodeObject())); mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@ -1321,15 +1318,13 @@ void MacroAssembler::IsObjectNameType(Register object,
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() { void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero()); mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1); CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces)); ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
} }
#endif
void MacroAssembler::PushTryHandler(StackHandler::Kind kind, void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
@ -1755,7 +1750,7 @@ void MacroAssembler::Allocate(int object_size,
object_size -= bits; object_size -= bits;
shift += 8; shift += 8;
Operand bits_operand(bits); Operand bits_operand(bits);
ASSERT(bits_operand.is_single_instruction(this)); ASSERT(bits_operand.is_single_instruction(isolate(), this));
add(scratch2, source, bits_operand, SetCC, cond); add(scratch2, source, bits_operand, SetCC, cond);
source = scratch2; source = scratch2;
cond = cc; cond = cc;
@ -2305,12 +2300,12 @@ void MacroAssembler::CallStub(CodeStub* stub,
TypeFeedbackId ast_id, TypeFeedbackId ast_id,
Condition cond) { Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond); Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
} }
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond); Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
} }
@ -2339,10 +2334,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
Label profiler_disabled; Label profiler_disabled;
Label end_profiler_check; Label end_profiler_check;
bool* is_profiling_flag = mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
isolate()->cpu_profiler()->is_profiling_address();
STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
ldrb(r9, MemOperand(r9, 0)); ldrb(r9, MemOperand(r9, 0));
cmp(r9, Operand(0)); cmp(r9, Operand(0));
b(eq, &profiler_disabled); b(eq, &profiler_disabled);
@ -2375,7 +2367,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// Native call returns to the DirectCEntry stub which redirects to the // Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC). // return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves. // DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub; DirectCEntryStub stub(isolate());
stub.GenerateCall(this, r3); stub.GenerateCall(this, r3);
if (FLAG_log_timer_events) { if (FLAG_log_timer_events) {
@ -2455,14 +2447,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
} }
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
add(sp, sp, Operand(num_arguments * kPointerSize));
}
LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
void MacroAssembler::IndexFromHash(Register hash, Register index) { void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks // If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index // that the constants for the maximum number of digits for an array index
@ -2580,7 +2564,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack. sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0)); vstr(double_input, MemOperand(sp, 0));
DoubleToIStub stub(sp, result, 0, true, true); DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub); CallStub(&stub);
add(sp, sp, Operand(kDoubleSize)); add(sp, sp, Operand(kDoubleSize));
@ -2602,7 +2586,8 @@ void MacroAssembler::TruncateHeapNumberToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead. // If we fell through then inline version didn't succeed - call stub instead.
push(lr); push(lr);
DoubleToIStub stub(object, DoubleToIStub stub(isolate(),
object,
result, result,
HeapNumber::kValueOffset - kHeapObjectTag, HeapNumber::kValueOffset - kHeapObjectTag,
true, true,
@ -2657,10 +2642,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// If the expected number of arguments of the runtime function is // If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the // constant, we check that the actual number of arguments match the
// expectation. // expectation.
if (f->nargs >= 0 && f->nargs != num_arguments) { CHECK(f->nargs < 0 || f->nargs == num_arguments);
IllegalOperation(num_arguments);
return;
}
// TODO(1236192): Most runtime routines don't need the number of // TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we // arguments passed in because it is constant. At some point we
@ -2668,7 +2650,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f,
// smarter. // smarter.
mov(r0, Operand(num_arguments)); mov(r0, Operand(num_arguments));
mov(r1, Operand(ExternalReference(f, isolate()))); mov(r1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1, save_doubles); CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub); CallStub(&stub);
} }
@ -2678,7 +2660,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
mov(r0, Operand(num_arguments)); mov(r0, Operand(num_arguments));
mov(r1, Operand(ext)); mov(r1, Operand(ext));
CEntryStub stub(1); CEntryStub stub(isolate(), 1);
CallStub(&stub); CallStub(&stub);
} }
@ -2710,8 +2692,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
#endif #endif
mov(r1, Operand(builtin)); mov(r1, Operand(builtin));
CEntryStub stub(1); CEntryStub stub(isolate(), 1);
Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
} }
@ -3794,36 +3776,19 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg, void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg, DwVfpRegister input_reg,
LowDwVfpRegister double_scratch) { LowDwVfpRegister double_scratch) {
Label above_zero;
Label done; Label done;
Label in_bounds;
VFPCompareAndSetFlags(input_reg, 0.0);
b(gt, &above_zero);
// Double value is less than zero, NaN or Inf, return 0. // Handle inputs >= 255 (including +infinity).
mov(result_reg, Operand::Zero());
b(al, &done);
// Double value is >= 255, return 255.
bind(&above_zero);
Vmov(double_scratch, 255.0, result_reg); Vmov(double_scratch, 255.0, result_reg);
VFPCompareAndSetFlags(input_reg, double_scratch);
b(le, &in_bounds);
mov(result_reg, Operand(255)); mov(result_reg, Operand(255));
b(al, &done); VFPCompareAndSetFlags(input_reg, double_scratch);
b(ge, &done);
// In 0-255 range, round and truncate.
bind(&in_bounds); // For inputs < 255 (including negative) vcvt_u32_f64 with round-to-nearest
// Save FPSCR. // rounding mode will provide the correct result.
vmrs(ip); vcvt_u32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
// Set rounding mode to round to the nearest integer by clearing bits[23:22].
bic(result_reg, ip, Operand(kVFPRoundingModeMask));
vmsr(result_reg);
vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
vmov(result_reg, double_scratch.low()); vmov(result_reg, double_scratch.low());
// Restore FPSCR.
vmsr(ip);
bind(&done); bind(&done);
} }

46
deps/v8/src/arm/macro-assembler-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
@ -102,7 +79,11 @@ class MacroAssembler: public Assembler {
static int CallSize(Register target, Condition cond = al); static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al); void Call(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target, int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = al);
static int CallSizeNotPredictableCodeSize(Isolate* isolate,
Address target,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
Condition cond = al); Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, void Call(Address target, RelocInfo::Mode rmode,
@ -627,12 +608,10 @@ class MacroAssembler: public Assembler {
Register scratch, Register scratch,
Label* fail); Label* fail);
#ifdef ENABLE_DEBUGGER_SUPPORT
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
void DebugBreak(); void DebugBreak();
#endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
@ -951,10 +930,6 @@ class MacroAssembler: public Assembler {
} }
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
// Picks out an array index from the hash field. // Picks out an array index from the hash field.
// Register use: // Register use:
// hash - holds the index's hash. Clobbered. // hash - holds the index's hash. Clobbered.
@ -1524,11 +1499,12 @@ class FrameAndConstantPoolScope {
type_(type), type_(type),
old_has_frame_(masm->has_frame()), old_has_frame_(masm->has_frame()),
old_constant_pool_available_(masm->is_constant_pool_available()) { old_constant_pool_available_(masm->is_constant_pool_available()) {
// We only want to enable constant pool access for non-manual frame scopes
// to ensure the constant pool pointer is valid throughout the scope.
ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
masm->set_has_frame(true); masm->set_has_frame(true);
masm->set_constant_pool_available(true); masm->set_constant_pool_available(true);
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) { masm->EnterFrame(type, !old_constant_pool_available_);
masm->EnterFrame(type, !old_constant_pool_available_);
}
} }
~FrameAndConstantPoolScope() { ~FrameAndConstantPoolScope() {

31
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -1043,7 +1020,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
ExternalReference stack_guard_check = ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(isolate()); ExternalReference::re_check_stack_guard_state(isolate());
__ mov(ip, Operand(stack_guard_check)); __ mov(ip, Operand(stack_guard_check));
DirectCEntryStub stub; DirectCEntryStub stub(isolate());
stub.GenerateCall(masm_, ip); stub.GenerateCall(masm_, ip);
// Drop the return address from the stack. // Drop the return address from the stack.
@ -1094,7 +1071,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <= ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size()); re_code->instruction_start() + re_code->instruction_size());
MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate); Object* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address(); int delta = code_handle->address() - re_code->address();

27
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_

33
deps/v8/src/arm/simulator-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h> #include <stdarg.h>
#include <stdlib.h> #include <stdlib.h>
@ -773,8 +750,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
z_flag_FPSCR_ = false; z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false; c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false; v_flag_FPSCR_ = false;
FPSCR_rounding_mode_ = RZ; FPSCR_rounding_mode_ = RN;
FPSCR_default_NaN_mode_ = true; FPSCR_default_NaN_mode_ = false;
inv_op_vfp_flag_ = false; inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false; div_zero_vfp_flag_ = false;
@ -2936,7 +2913,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) && } else if ((instr->Opc2Value() == 0xA) && (instr->Opc3Value() == 0x3) &&
(instr->Bit(8) == 1)) { (instr->Bit(8) == 1)) {
// vcvt.f64.s32 Dd, Dd, #<fbits> // vcvt.f64.s32 Dd, Dd, #<fbits>
int fraction_bits = 32 - ((instr->Bit(5) << 4) | instr->Bits(3, 0)); int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
int fixed_value = get_sinteger_from_s_register(vd * 2); int fixed_value = get_sinteger_from_s_register(vd * 2);
double divide = 1 << fraction_bits; double divide = 1 << fraction_bits;
set_d_register_from_double(vd, fixed_value / divide); set_d_register_from_double(vd, fixed_value / divide);

27
deps/v8/src/arm/simulator-arm.h

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Declares a Simulator for ARM instructions if we are not generating a native // Declares a Simulator for ARM instructions if we are not generating a native

92
deps/v8/src/arm/stub-cache-arm.cc

@ -1,29 +1,6 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -431,6 +408,22 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, miss_label); __ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) { } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label); __ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = descriptors->GetFieldType(descriptor);
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1, it.Current(), &do_store);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
break;
}
__ b(eq, &do_store);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) { } else if (representation.IsDouble()) {
Label do_store, heap_number; Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@ -593,6 +586,22 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ JumpIfNotSmi(value_reg, miss_label); __ JumpIfNotSmi(value_reg, miss_label);
} else if (representation.IsHeapObject()) { } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label); __ JumpIfSmi(value_reg, miss_label);
HeapType* field_type = lookup->GetFieldType();
HeapType::Iterator<Map> it = field_type->Classes();
if (!it.Done()) {
__ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
Label do_store;
while (true) {
__ CompareMap(scratch1, it.Current(), &do_store);
it.Advance();
if (it.Done()) {
__ b(ne, miss_label);
break;
}
__ b(eq, &do_store);
}
__ bind(&do_store);
}
} else if (representation.IsDouble()) { } else if (representation.IsDouble()) {
// Load the double storage. // Load the double storage.
if (index < 0) { if (index < 0) {
@ -801,7 +810,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref)); __ mov(api_function_address, Operand(ref));
// Jump to stub. // Jump to stub.
CallApiFunctionStub stub(is_store, call_data_undefined, argc); CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
@ -836,7 +845,9 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
int depth = 0; int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null(); Handle<JSObject> current = Handle<JSObject>::null();
if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); if (type->IsConstant()) {
current = Handle<JSObject>::cast(type->AsConstant()->Value());
}
Handle<JSObject> prototype = Handle<JSObject>::null(); Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map; Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder->map()); Handle<Map> holder_map(holder->map());
@ -859,7 +870,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
name = factory()->InternalizeString(Handle<String>::cast(name)); name = factory()->InternalizeString(Handle<String>::cast(name));
} }
ASSERT(current.is_null() || ASSERT(current.is_null() ||
current->property_dictionary()->FindEntry(*name) == current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound); NameDictionary::kNotFound);
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
@ -999,15 +1010,17 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
Representation representation) { Representation representation) {
if (!reg.is(receiver())) __ mov(receiver(), reg); if (!reg.is(receiver())) __ mov(receiver(), reg);
if (kind() == Code::LOAD_IC) { if (kind() == Code::LOAD_IC) {
LoadFieldStub stub(field.is_inobject(holder), LoadFieldStub stub(isolate(),
field.is_inobject(holder),
field.translate(holder), field.translate(holder),
representation); representation);
GenerateTailCall(masm(), stub.GetCode(isolate())); GenerateTailCall(masm(), stub.GetCode());
} else { } else {
KeyedLoadFieldStub stub(field.is_inobject(holder), KeyedLoadFieldStub stub(isolate(),
field.is_inobject(holder),
field.translate(holder), field.translate(holder),
representation); representation);
GenerateTailCall(masm(), stub.GetCode(isolate())); GenerateTailCall(masm(), stub.GetCode());
} }
} }
@ -1061,7 +1074,7 @@ void LoadStubCompiler::GenerateLoadCallback(
ExternalReference ref = ExternalReference(&fun, type, isolate()); ExternalReference ref = ExternalReference(&fun, type, isolate());
__ mov(getter_address_reg, Operand(ref)); __ mov(getter_address_reg, Operand(ref));
CallApiGetterStub stub; CallApiGetterStub stub(isolate());
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
@ -1154,19 +1167,6 @@ void LoadStubCompiler::GenerateLoadInterceptor(
} }
void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) {
Label success;
// Check that the object is a boolean.
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(object, ip);
__ b(eq, &success);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(object, ip);
__ b(ne, miss);
__ bind(&success);
}
Handle<Code> StoreStubCompiler::CompileStoreCallback( Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<JSObject> object,
Handle<JSObject> holder, Handle<JSObject> holder,

49
deps/v8/src/arm64/assembler-arm64-inl.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_ #ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
#define V8_ARM64_ASSEMBLER_ARM64_INL_H_ #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
@ -434,6 +411,12 @@ Operand Operand::UntagSmiAndScale(Register smi, int scale) {
} }
MemOperand::MemOperand()
: base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
}
MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode) MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode), : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
@ -738,7 +721,7 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
} }
static const int kCodeAgeSequenceSize = 5 * kInstructionSize; static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize; static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
@ -750,7 +733,6 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() { Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(!Code::IsYoungSequence(pc_));
// Read the stub entry point from the code age sequence. // Read the stub entry point from the code age sequence.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address)); return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
@ -759,7 +741,7 @@ Code* RelocInfo::code_age_stub() {
void RelocInfo::set_code_age_stub(Code* stub) { void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(!Code::IsYoungSequence(pc_)); ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
// Overwrite the stub entry point in the code age sequence. This is loaded as // Overwrite the stub entry point in the code age sequence. This is loaded as
// a literal so there is no need to call FlushICache here. // a literal so there is no need to call FlushICache here.
Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
@ -825,14 +807,12 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this); visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this); visitor->VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) && } else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) && IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) { isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this); visitor->VisitDebugTarget(this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) { } else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this); visitor->VisitRuntimeEntry(this);
} }
@ -850,14 +830,12 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this); StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this); StaticVisitor::VisitExternalReference(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() && } else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) { IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this); StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) { } else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this); StaticVisitor::VisitRuntimeEntry(this);
} }
@ -1199,11 +1177,16 @@ void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
} }
inline void Assembler::CheckBuffer() { inline void Assembler::CheckBufferSpace() {
ASSERT(pc_ < (buffer_ + buffer_size_)); ASSERT(pc_ < (buffer_ + buffer_size_));
if (buffer_space() < kGap) { if (buffer_space() < kGap) {
GrowBuffer(); GrowBuffer();
} }
}
inline void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) { if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true); CheckVeneerPool(false, true);
} }

178
deps/v8/src/arm64/assembler-arm64.cc

@ -271,14 +271,9 @@ void Operand::initialize_handle(Handle<Object> handle) {
} }
bool Operand::NeedsRelocation() const { bool Operand::NeedsRelocation(Isolate* isolate) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) { if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG return Serializer::enabled(isolate);
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
return Serializer::enabled();
} }
return !RelocInfo::IsNone(rmode_); return !RelocInfo::IsNone(rmode_);
@ -456,6 +451,8 @@ void Assembler::bind(Label* label) {
ASSERT(!label->is_near_linked()); ASSERT(!label->is_near_linked());
ASSERT(!label->is_bound()); ASSERT(!label->is_bound());
DeleteUnresolvedBranchInfoForLabel(label);
// If the label is linked, the link chain looks something like this: // If the label is linked, the link chain looks something like this:
// //
// |--I----I-------I-------L // |--I----I-------I-------L
@ -497,8 +494,6 @@ void Assembler::bind(Label* label) {
ASSERT(label->is_bound()); ASSERT(label->is_bound());
ASSERT(!label->is_linked()); ASSERT(!label->is_linked());
DeleteUnresolvedBranchInfoForLabel(label);
} }
@ -545,21 +540,50 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
} }
void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
ASSERT(label->is_linked());
CheckLabelLinkChain(label);
int link_offset = label->pos();
int link_pcoffset;
bool end_of_chain = false;
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
link_pcoffset = link->ImmPCOffset();
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
int max_reachable_pc = InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType());
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
unresolved_info_it it;
for (it = range.first; it != range.second; ++it) {
if (it->second.pc_offset_ == link_offset) {
unresolved_branches_.erase(it);
break;
}
}
}
end_of_chain = (link_pcoffset == 0);
link_offset = link_offset + link_pcoffset;
}
}
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) { if (unresolved_branches_.empty()) {
ASSERT(next_veneer_pool_check_ == kMaxInt); ASSERT(next_veneer_pool_check_ == kMaxInt);
return; return;
} }
// Branches to this label will be resolved when the label is bound below. if (label->is_linked()) {
std::multimap<int, FarBranchInfo>::iterator it_tmp, it; // Branches to this label will be resolved when the label is bound, normally
it = unresolved_branches_.begin(); // just after all the associated info has been deleted.
while (it != unresolved_branches_.end()) { DeleteUnresolvedBranchInfoForLabelTraverse(label);
it_tmp = it++;
if (it_tmp->second.label_ == label) {
CHECK(it_tmp->first >= pc_offset());
unresolved_branches_.erase(it_tmp);
}
} }
if (unresolved_branches_.empty()) { if (unresolved_branches_.empty()) {
next_veneer_pool_check_ = kMaxInt; next_veneer_pool_check_ = kMaxInt;
@ -645,7 +669,7 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
void Assembler::ConstantPoolMarker(uint32_t size) { void Assembler::ConstantPoolMarker(uint32_t size) {
ASSERT(is_const_pool_blocked()); ASSERT(is_const_pool_blocked());
// + 1 is for the crash guard. // + 1 is for the crash guard.
Emit(LDR_x_lit | ImmLLiteral(2 * size + 1) | Rt(xzr)); Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
} }
@ -1658,6 +1682,13 @@ void Assembler::frinta(const FPRegister& fd,
} }
void Assembler::frintm(const FPRegister& fd,
const FPRegister& fn) {
ASSERT(fd.SizeInBits() == fn.SizeInBits());
FPDataProcessing1Source(fd, fn, FRINTM);
}
void Assembler::frintn(const FPRegister& fd, void Assembler::frintn(const FPRegister& fd,
const FPRegister& fn) { const FPRegister& fn) {
ASSERT(fd.SizeInBits() == fn.SizeInBits()); ASSERT(fd.SizeInBits() == fn.SizeInBits());
@ -1872,7 +1903,7 @@ void Assembler::AddSub(const Register& rd,
FlagsUpdate S, FlagsUpdate S,
AddSubOp op) { AddSubOp op) {
ASSERT(rd.SizeInBits() == rn.SizeInBits()); ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) { if (operand.IsImmediate()) {
int64_t immediate = operand.immediate(); int64_t immediate = operand.immediate();
ASSERT(IsImmAddSub(immediate)); ASSERT(IsImmAddSub(immediate));
@ -1912,7 +1943,7 @@ void Assembler::AddSubWithCarry(const Register& rd,
ASSERT(rd.SizeInBits() == rn.SizeInBits()); ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(rd.SizeInBits() == operand.reg().SizeInBits()); ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
} }
@ -1933,10 +1964,7 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which // Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware. // might be run on real hardware.
if (!Serializer::enabled()) { if (!Serializer::enabled(isolate())) {
#ifdef DEBUG
Serializer::TooLateToEnableNow();
#endif
// The arguments to the debug marker need to be contiguous in memory, so // The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools. // make sure we don't try to emit pools.
BlockPoolsScope scope(this); BlockPoolsScope scope(this);
@ -1971,7 +1999,7 @@ void Assembler::Logical(const Register& rd,
const Operand& operand, const Operand& operand,
LogicalOp op) { LogicalOp op) {
ASSERT(rd.SizeInBits() == rn.SizeInBits()); ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) { if (operand.IsImmediate()) {
int64_t immediate = operand.immediate(); int64_t immediate = operand.immediate();
unsigned reg_size = rd.SizeInBits(); unsigned reg_size = rd.SizeInBits();
@ -2023,7 +2051,7 @@ void Assembler::ConditionalCompare(const Register& rn,
Condition cond, Condition cond,
ConditionalCompareOp op) { ConditionalCompareOp op) {
Instr ccmpop; Instr ccmpop;
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
if (operand.IsImmediate()) { if (operand.IsImmediate()) {
int64_t immediate = operand.immediate(); int64_t immediate = operand.immediate();
ASSERT(IsImmConditionalCompare(immediate)); ASSERT(IsImmConditionalCompare(immediate));
@ -2138,7 +2166,7 @@ void Assembler::DataProcShiftedRegister(const Register& rd,
Instr op) { Instr op) {
ASSERT(operand.IsShiftedRegister()); ASSERT(operand.IsShiftedRegister());
ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
Emit(SF(rd) | op | Flags(S) | Emit(SF(rd) | op | Flags(S) |
ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
Rm(operand.reg()) | Rn(rn) | Rd(rd)); Rm(operand.reg()) | Rn(rn) | Rd(rd));
@ -2150,7 +2178,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
const Operand& operand, const Operand& operand,
FlagsUpdate S, FlagsUpdate S,
Instr op) { Instr op) {
ASSERT(!operand.NeedsRelocation()); ASSERT(!operand.NeedsRelocation(isolate()));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
@ -2489,12 +2517,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!RelocInfo::IsNone(rmode)) { if (!RelocInfo::IsNone(rmode)) {
// Don't record external references unless the heap will be serialized. // Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) { if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG if (!Serializer::enabled(isolate()) && !emit_debug_code()) {
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
if (!Serializer::enabled() && !emit_debug_code()) {
return; return;
} }
} }
@ -2581,7 +2604,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
{ {
// Block recursive calls to CheckConstPool and protect from veneer pools. // Block recursive calls to CheckConstPool and protect from veneer pools.
BlockPoolsScope block_pools(this); BlockPoolsScope block_pools(this);
RecordComment("[ Constant Pool");
RecordConstPool(pool_size); RecordConstPool(pool_size);
// Emit jump over constant pool if necessary. // Emit jump over constant pool if necessary.
@ -2601,6 +2623,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// beginning of the constant pool. // beginning of the constant pool.
// TODO(all): currently each relocated constant is 64 bits, consider adding // TODO(all): currently each relocated constant is 64 bits, consider adding
// support for 32-bit entries. // support for 32-bit entries.
RecordComment("[ Constant Pool");
ConstantPoolMarker(2 * num_pending_reloc_info_); ConstantPoolMarker(2 * num_pending_reloc_info_);
ConstantPoolGuard(); ConstantPoolGuard();
@ -2650,12 +2673,10 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) { void Assembler::RecordVeneerPool(int location_offset, int size) {
#ifdef ENABLE_DEBUGGER_SUPPORT
RelocInfo rinfo(buffer_ + location_offset, RelocInfo rinfo(buffer_ + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
NULL); NULL);
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
#endif
} }
@ -2789,22 +2810,91 @@ void Assembler::RecordDebugBreakSlot() {
void Assembler::RecordConstPool(int size) { void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the // We only need this for debugger support, to correctly compute offsets in the
// code. // code.
#ifdef ENABLE_DEBUGGER_SUPPORT
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
#endif
} }
MaybeObject* Assembler::AllocateConstantPool(Heap* heap) { Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
// No out-of-line constant pool support. // No out-of-line constant pool support.
UNREACHABLE(); ASSERT(!FLAG_enable_ool_constant_pool);
return NULL; return isolate->factory()->empty_constant_pool_array();
} }
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
// No out-of-line constant pool support. // No out-of-line constant pool support.
UNREACHABLE(); ASSERT(!FLAG_enable_ool_constant_pool);
return;
}
void PatchingAssembler::MovInt64(const Register& rd, int64_t imm) {
Label start;
bind(&start);
ASSERT(rd.Is64Bits());
ASSERT(!rd.IsSP());
for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
movk(rd, imm16, 16 * i);
}
ASSERT(SizeOfCodeGeneratedSince(&start) ==
kMovInt64NInstrs * kInstructionSize);
}
void PatchingAssembler::PatchAdrFar(Instruction* target) {
// The code at the current instruction should be:
// adr rd, 0
// nop (adr_far)
// nop (adr_far)
// nop (adr_far)
// movz scratch, 0
// add rd, rd, scratch
// Verify the expected code.
Instruction* expected_adr = InstructionAt(0);
CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
int rd_code = expected_adr->Rd();
for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
}
Instruction* expected_movz =
InstructionAt((kAdrFarPatchableNInstrs - 2) * kInstructionSize);
CHECK(expected_movz->IsMovz() &&
(expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));
int scratch_code = expected_movz->Rd();
Instruction* expected_add =
InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
CHECK(expected_add->IsAddSubShifted() &&
(expected_add->Mask(AddSubOpMask) == ADD) &&
expected_add->SixtyFourBits() &&
(expected_add->Rd() == rd_code) && (expected_add->Rn() == rd_code) &&
(expected_add->Rm() == scratch_code) &&
(static_cast<Shift>(expected_add->ShiftDP()) == LSL) &&
(expected_add->ImmDPShift() == 0));
// Patch to load the correct address.
Label start;
bind(&start);
Register rd = Register::XRegFromCode(rd_code);
// If the target is in range, we only patch the adr. Otherwise we patch the
// nops with fixup instructions.
int target_offset = expected_adr->DistanceTo(target);
if (Instruction::IsValidPCRelOffset(target_offset)) {
adr(rd, target_offset);
for (int i = 0; i < kAdrFarPatchableNInstrs - 2; ++i) {
nop(ADR_FAR_NOP);
}
} else {
Register scratch = Register::XRegFromCode(scratch_code);
adr(rd, 0);
MovInt64(scratch, target_offset);
add(rd, rd, scratch);
}
} }

91
deps/v8/src/arm64/assembler-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_ASSEMBLER_ARM64_H_ #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
#define V8_ARM64_ASSEMBLER_ARM64_H_ #define V8_ARM64_ASSEMBLER_ARM64_H_
@ -285,9 +262,9 @@ struct FPRegister : public CPURegister {
static const unsigned kAllocatableLowRangeBegin = 0; static const unsigned kAllocatableLowRangeBegin = 0;
static const unsigned kAllocatableLowRangeEnd = 14; static const unsigned kAllocatableLowRangeEnd = 14;
static const unsigned kAllocatableHighRangeBegin = 16; static const unsigned kAllocatableHighRangeBegin = 16;
static const unsigned kAllocatableHighRangeEnd = 29; static const unsigned kAllocatableHighRangeEnd = 28;
static const RegList kAllocatableFPRegisters = 0x3fff7fff; static const RegList kAllocatableFPRegisters = 0x1fff7fff;
// Gap between low and high ranges. // Gap between low and high ranges.
static const int kAllocatableRangeGapSize = static const int kAllocatableRangeGapSize =
@ -316,12 +293,12 @@ struct FPRegister : public CPURegister {
ASSERT((kAllocatableLowRangeBegin == 0) && ASSERT((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 14) && (kAllocatableLowRangeEnd == 14) &&
(kAllocatableHighRangeBegin == 16) && (kAllocatableHighRangeBegin == 16) &&
(kAllocatableHighRangeEnd == 29)); (kAllocatableHighRangeEnd == 28));
const char* const names[] = { const char* const names[] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d8", "d9", "d10", "d11", "d12", "d13", "d14",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
"d24", "d25", "d26", "d27", "d28", "d29" "d24", "d25", "d26", "d27", "d28"
}; };
return names[index]; return names[index];
} }
@ -420,9 +397,11 @@ ALIAS_REGISTER(Register, wzr, w31);
// Keeps the 0 double value. // Keeps the 0 double value.
ALIAS_REGISTER(FPRegister, fp_zero, d15); ALIAS_REGISTER(FPRegister, fp_zero, d15);
// Crankshaft double scratch register. // Crankshaft double scratch register.
ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d30); ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
// MacroAssembler double scratch register. // MacroAssembler double scratch registers.
ALIAS_REGISTER(FPRegister, fp_scratch, d31); ALIAS_REGISTER(FPRegister, fp_scratch, d30);
ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
#undef ALIAS_REGISTER #undef ALIAS_REGISTER
@ -514,8 +493,8 @@ class CPURegList {
void Combine(const CPURegList& other); void Combine(const CPURegList& other);
// Remove every register in the other CPURegList from this one. Registers that // Remove every register in the other CPURegList from this one. Registers that
// do not exist in this list are ignored. The type and size of the registers // do not exist in this list are ignored. The type of the registers in the
// in the 'other' list must match those in this list. // 'other' list must match those in this list.
void Remove(const CPURegList& other); void Remove(const CPURegList& other);
// Variants of Combine and Remove which take CPURegisters. // Variants of Combine and Remove which take CPURegisters.
@ -670,7 +649,7 @@ class Operand {
// Relocation information. // Relocation information.
RelocInfo::Mode rmode() const { return rmode_; } RelocInfo::Mode rmode() const { return rmode_; }
void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; } void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
bool NeedsRelocation() const; bool NeedsRelocation(Isolate* isolate) const;
// Helpers // Helpers
inline static Operand UntagSmi(Register smi); inline static Operand UntagSmi(Register smi);
@ -690,6 +669,7 @@ class Operand {
// MemOperand represents a memory operand in a load or store instruction. // MemOperand represents a memory operand in a load or store instruction.
class MemOperand { class MemOperand {
public: public:
inline explicit MemOperand();
inline explicit MemOperand(Register base, inline explicit MemOperand(Register base,
ptrdiff_t offset = 0, ptrdiff_t offset = 0,
AddrMode addrmode = Offset); AddrMode addrmode = Offset);
@ -1499,8 +1479,9 @@ class Assembler : public AssemblerBase {
enum NopMarkerTypes { enum NopMarkerTypes {
DEBUG_BREAK_NOP, DEBUG_BREAK_NOP,
INTERRUPT_CODE_NOP, INTERRUPT_CODE_NOP,
ADR_FAR_NOP,
FIRST_NOP_MARKER = DEBUG_BREAK_NOP, FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
LAST_NOP_MARKER = INTERRUPT_CODE_NOP LAST_NOP_MARKER = ADR_FAR_NOP
}; };
void nop(NopMarkerTypes n) { void nop(NopMarkerTypes n) {
@ -1582,6 +1563,9 @@ class Assembler : public AssemblerBase {
// FP round to integer (nearest with ties to away). // FP round to integer (nearest with ties to away).
void frinta(const FPRegister& fd, const FPRegister& fn); void frinta(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (toward minus infinity).
void frintm(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (nearest with ties to even). // FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn); void frintn(const FPRegister& fd, const FPRegister& fn);
@ -1688,6 +1672,10 @@ class Assembler : public AssemblerBase {
return reinterpret_cast<Instruction*>(buffer_ + offset); return reinterpret_cast<Instruction*>(buffer_ + offset);
} }
ptrdiff_t InstructionOffset(Instruction* instr) const {
return reinterpret_cast<byte*>(instr) - buffer_;
}
// Register encoding. // Register encoding.
static Instr Rd(CPURegister rd) { static Instr Rd(CPURegister rd) {
ASSERT(rd.code() != kSPRegInternalCode); ASSERT(rd.code() != kSPRegInternalCode);
@ -1761,6 +1749,13 @@ class Assembler : public AssemblerBase {
inline static Instr ImmCondCmp(unsigned imm); inline static Instr ImmCondCmp(unsigned imm);
inline static Instr Nzcv(StatusFlags nzcv); inline static Instr Nzcv(StatusFlags nzcv);
static bool IsImmAddSub(int64_t immediate);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r);
// MemOperand offset encoding. // MemOperand offset encoding.
inline static Instr ImmLSUnsigned(int imm12); inline static Instr ImmLSUnsigned(int imm12);
inline static Instr ImmLS(int imm9); inline static Instr ImmLS(int imm9);
@ -1805,7 +1800,7 @@ class Assembler : public AssemblerBase {
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
// Allocate a constant pool of the correct size for the generated code. // Allocate a constant pool of the correct size for the generated code.
MaybeObject* AllocateConstantPool(Heap* heap); Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
// Generate the constant pool for the generated code. // Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool); void PopulateConstantPool(ConstantPoolArray* constant_pool);
@ -1874,11 +1869,6 @@ class Assembler : public AssemblerBase {
unsigned imm_s, unsigned imm_s,
unsigned imm_r, unsigned imm_r,
LogicalOp op); LogicalOp op);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r);
void ConditionalCompare(const Register& rn, void ConditionalCompare(const Register& rn,
const Operand& operand, const Operand& operand,
@ -1909,7 +1899,6 @@ class Assembler : public AssemblerBase {
const Operand& operand, const Operand& operand,
FlagsUpdate S, FlagsUpdate S,
AddSubOp op); AddSubOp op);
static bool IsImmAddSub(int64_t immediate);
static bool IsImmFP32(float imm); static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm); static bool IsImmFP64(double imm);
@ -2034,6 +2023,7 @@ class Assembler : public AssemblerBase {
} }
void GrowBuffer(); void GrowBuffer();
void CheckBufferSpace();
void CheckBuffer(); void CheckBuffer();
// Pc offset of the next constant pool check. // Pc offset of the next constant pool check.
@ -2176,6 +2166,11 @@ class Assembler : public AssemblerBase {
// not later attempt (likely unsuccessfully) to patch it to branch directly to // not later attempt (likely unsuccessfully) to patch it to branch directly to
// the label. // the label.
void DeleteUnresolvedBranchInfoForLabel(Label* label); void DeleteUnresolvedBranchInfoForLabel(Label* label);
// This function deletes the information related to the label by traversing
// the label chain, and for each PC-relative instruction in the chain checking
// if pending unresolved information exists. Its complexity is proportional to
// the length of the label chain.
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
private: private:
PositionsRecorder positions_recorder_; PositionsRecorder positions_recorder_;
@ -2218,13 +2213,21 @@ class PatchingAssembler : public Assembler {
size_t length = buffer_size_ - kGap; size_t length = buffer_size_ - kGap;
CPU::FlushICache(buffer_, length); CPU::FlushICache(buffer_, length);
} }
static const int kMovInt64NInstrs = 4;
void MovInt64(const Register& rd, int64_t imm);
// See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
void PatchAdrFar(Instruction* target);
}; };
class EnsureSpace BASE_EMBEDDED { class EnsureSpace BASE_EMBEDDED {
public: public:
explicit EnsureSpace(Assembler* assembler) { explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBuffer(); assembler->CheckBufferSpace();
} }
}; };

67
deps/v8/src/arm64/builtins-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -370,13 +347,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label rt_call, allocated; Label rt_call, allocated;
if (FLAG_inline_new) { if (FLAG_inline_new) {
Label undo_allocation; Label undo_allocation;
#if ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate); ExternalReference::debug_step_in_fp_address(isolate);
__ Mov(x2, Operand(debug_step_in_fp)); __ Mov(x2, Operand(debug_step_in_fp));
__ Ldr(x2, MemOperand(x2)); __ Ldr(x2, MemOperand(x2));
__ Cbnz(x2, &rt_call); __ Cbnz(x2, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
Register init_map = x2; Register init_map = x2;
__ Ldr(init_map, __ Ldr(init_map,
@ -785,7 +760,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// No type feedback cell is available. // No type feedback cell is available.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex); __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
ParameterCount actual(x0); ParameterCount actual(x0);
@ -912,7 +887,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
} }
// Jump to point after the code-age stub. // Jump to point after the code-age stub.
__ Add(x0, x0, kCodeAgeSequenceSize); __ Add(x0, x0, kNoCodeAgeSequenceLength);
__ Br(x0); __ Br(x0);
} }
@ -1280,7 +1255,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// There is not enough stack space, so use a builtin to throw an appropriate // There is not enough stack space, so use a builtin to throw an appropriate
// error. // error.
__ Push(function, argc); __ Push(function, argc);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
// We should never return from the APPLY_OVERFLOW builtin. // We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) { if (__ emit_debug_code()) {
__ Unreachable(); __ Unreachable();
@ -1400,6 +1375,27 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
} }
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
// -- x0 : actual number of arguments
// -- x1 : function (passed through to callee)
// -- x2 : expected number of arguments
// -----------------------------------
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, jssp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(x2, LSL, kPointerSizeLog2));
__ B(le, stack_overflow);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0); __ SmiTag(x10, x0);
__ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
@ -1433,6 +1429,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- x2 : expected number of arguments // -- x2 : expected number of arguments
// ----------------------------------- // -----------------------------------
Label stack_overflow;
ArgumentAdaptorStackCheck(masm, &stack_overflow);
Register argc_actual = x0; // Excluding the receiver. Register argc_actual = x0; // Excluding the receiver.
Register argc_expected = x2; // Excluding the receiver. Register argc_expected = x2; // Excluding the receiver.
Register function = x1; Register function = x1;
@ -1552,6 +1551,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point without adapting the arguments. // Call the entry point without adapting the arguments.
__ Bind(&dont_adapt_arguments); __ Bind(&dont_adapt_arguments);
__ Jump(code_entry); __ Jump(code_entry);
__ Bind(&stack_overflow);
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ Unreachable();
}
} }

1045
deps/v8/src/arm64/code-stubs-arm64.cc

File diff suppressed because it is too large

78
deps/v8/src/arm64/code-stubs-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_ #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_ #define V8_ARM64_CODE_STUBS_ARM64_H_
@ -39,8 +16,8 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub { class StoreBufferOverflowStub: public PlatformCodeStub {
public: public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { } : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -79,8 +56,8 @@ class StringHelper : public AllStatic {
class StoreRegistersStateStub: public PlatformCodeStub { class StoreRegistersStateStub: public PlatformCodeStub {
public: public:
explicit StoreRegistersStateStub(SaveFPRegsMode with_fp) StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: save_doubles_(with_fp) {} : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
static Register to_be_pushed_lr() { return ip0; } static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate); static void GenerateAheadOfTime(Isolate* isolate);
@ -95,8 +72,8 @@ class StoreRegistersStateStub: public PlatformCodeStub {
class RestoreRegistersStateStub: public PlatformCodeStub { class RestoreRegistersStateStub: public PlatformCodeStub {
public: public:
explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp) RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
: save_doubles_(with_fp) {} : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
static void GenerateAheadOfTime(Isolate* isolate); static void GenerateAheadOfTime(Isolate* isolate);
private: private:
@ -113,12 +90,14 @@ class RecordWriteStub: public PlatformCodeStub {
// Stub to record the write of 'value' at 'address' in 'object'. // Stub to record the write of 'value' at 'address' in 'object'.
// Typically 'address' = 'object' + <some offset>. // Typically 'address' = 'object' + <some offset>.
// See MacroAssembler::RecordWriteField() for example. // See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Register object, RecordWriteStub(Isolate* isolate,
Register object,
Register value, Register value,
Register address, Register address,
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode) SaveFPRegsMode fp_mode)
: object_(object), : PlatformCodeStub(isolate),
object_(object),
value_(value), value_(value),
address_(address), address_(address),
remembered_set_action_(remembered_set_action), remembered_set_action_(remembered_set_action),
@ -210,9 +189,15 @@ class RecordWriteStub: public PlatformCodeStub {
: object_(object), : object_(object),
address_(address), address_(address),
scratch0_(scratch), scratch0_(scratch),
saved_regs_(kCallerSaved) { saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
ASSERT(!AreAliased(scratch, object, address)); ASSERT(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub, // We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in // but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent. // FullCodeGen::SetVar(), which is architecture independent.
@ -223,12 +208,6 @@ class RecordWriteStub: public PlatformCodeStub {
scratch1_ = Register(pool_available.PopLowestIndex()); scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex()); scratch2_ = Register(pool_available.PopLowestIndex());
// SaveCallerRegisters method needs to save caller saved register, however
// we don't bother saving ip0 and ip1 because they are used as scratch
// registers by the MacroAssembler.
saved_regs_.Remove(ip0);
saved_regs_.Remove(ip1);
// The scratch registers will be restored by other means so we don't need // The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers. // to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_); saved_regs_.Remove(scratch0_);
@ -253,7 +232,7 @@ class RecordWriteStub: public PlatformCodeStub {
// register will need to be preserved. Can we improve this? // register will need to be preserved. Can we improve this?
masm->PushCPURegList(saved_regs_); masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
masm->PushCPURegList(kCallerSavedFP); masm->PushCPURegList(saved_fp_regs_);
} }
} }
@ -261,7 +240,7 @@ class RecordWriteStub: public PlatformCodeStub {
// TODO(all): This can be very expensive, and it is likely that not every // TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this? // register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) { if (mode == kSaveFPRegs) {
masm->PopCPURegList(kCallerSavedFP); masm->PopCPURegList(saved_fp_regs_);
} }
masm->PopCPURegList(saved_regs_); masm->PopCPURegList(saved_regs_);
} }
@ -279,6 +258,7 @@ class RecordWriteStub: public PlatformCodeStub {
Register scratch1_; Register scratch1_;
Register scratch2_; Register scratch2_;
CPURegList saved_regs_; CPURegList saved_regs_;
CPURegList saved_fp_regs_;
// TODO(all): We should consider moving this somewhere else. // TODO(all): We should consider moving this somewhere else.
static CPURegList GetValidRegistersForAllocation() { static CPURegList GetValidRegistersForAllocation() {
@ -296,10 +276,7 @@ class RecordWriteStub: public PlatformCodeStub {
CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25); CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
// We also remove MacroAssembler's scratch registers. // We also remove MacroAssembler's scratch registers.
list.Remove(ip0); list.Remove(MacroAssembler::DefaultTmpList());
list.Remove(ip1);
list.Remove(x8);
list.Remove(x9);
return list; return list;
} }
@ -372,7 +349,7 @@ class RecordWriteStub: public PlatformCodeStub {
// the exit frame before doing the call with GenerateCall. // the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub { class DirectCEntryStub: public PlatformCodeStub {
public: public:
DirectCEntryStub() {} explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target); void GenerateCall(MacroAssembler* masm, Register target);
@ -388,7 +365,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
public: public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate), mode_(mode) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
@ -436,7 +414,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
class SubStringStub: public PlatformCodeStub { class SubStringStub: public PlatformCodeStub {
public: public:
SubStringStub() {} explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private: private:
Major MajorKey() { return SubString; } Major MajorKey() { return SubString; }
@ -448,7 +426,7 @@ class SubStringStub: public PlatformCodeStub {
class StringCompareStub: public PlatformCodeStub { class StringCompareStub: public PlatformCodeStub {
public: public:
StringCompareStub() { } explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in x0. // Compares two flat ASCII strings and returns result in x0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,

69
deps/v8/src/arm64/codegen-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -339,8 +316,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Non-hole double, copy value into a heap number. // Non-hole double, copy value into a heap number.
Register heap_num = x5; Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map); __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
__ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset)); x13, heap_num_map);
__ Mov(x13, dst_elements); __ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex)); __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs, __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
@ -373,14 +350,41 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
} }
bool Code::IsYoungSequence(byte* sequence) { CodeAgingHelper::CodeAgingHelper() {
return MacroAssembler::IsYoungSequence(sequence); ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
PatchingAssembler patcher(young_sequence_.start(),
young_sequence_.length() / kInstructionSize);
// The young sequence is the frame setup code for FUNCTION code types. It is
// generated by FullCodeGenerator::Generate.
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
#ifdef DEBUG
const int length = kCodeAgeStubEntryOffset / kInstructionSize;
ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
PatchingAssembler patcher_old(old_sequence_.start(), length);
MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
#endif
}
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
}
#endif
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence);
} }
void Code::GetCodeAgeAndParity(byte* sequence, Age* age, void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) { MarkingParity* parity) {
if (IsYoungSequence(sequence)) { if (IsYoungSequence(isolate, sequence)) {
*age = kNoAgeCodeAge; *age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY; *parity = NO_MARKING_PARITY;
} else { } else {
@ -395,7 +399,8 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence, byte* sequence,
Code::Age age, Code::Age age,
MarkingParity parity) { MarkingParity parity) {
PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize); PatchingAssembler patcher(sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) { if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else { } else {

27
deps/v8/src/arm64/codegen-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CODEGEN_ARM64_H_ #ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_ #define V8_ARM64_CODEGEN_ARM64_H_

34
deps/v8/src/arm64/constants-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CONSTANTS_ARM64_H_ #ifndef V8_ARM64_CONSTANTS_ARM64_H_
#define V8_ARM64_CONSTANTS_ARM64_H_ #define V8_ARM64_CONSTANTS_ARM64_H_
@ -112,6 +89,8 @@ const unsigned kZeroRegCode = 31;
const unsigned kJSSPCode = 28; const unsigned kJSSPCode = 28;
const unsigned kSPRegInternalCode = 63; const unsigned kSPRegInternalCode = 63;
const unsigned kRegCodeMask = 0x1f; const unsigned kRegCodeMask = 0x1f;
const unsigned kShiftAmountWRegMask = 0x1f;
const unsigned kShiftAmountXRegMask = 0x3f;
// Standard machine types defined by AAPCS64. // Standard machine types defined by AAPCS64.
const unsigned kByteSize = 8; const unsigned kByteSize = 8;
const unsigned kByteSizeInBytes = kByteSize >> 3; const unsigned kByteSizeInBytes = kByteSize >> 3;
@ -130,6 +109,7 @@ const unsigned kQuadWordSizeInBytes = kQuadWordSize >> 3;
// AArch64 floating-point specifics. These match IEEE-754. // AArch64 floating-point specifics. These match IEEE-754.
const unsigned kDoubleMantissaBits = 52; const unsigned kDoubleMantissaBits = 52;
const unsigned kDoubleExponentBits = 11; const unsigned kDoubleExponentBits = 11;
const unsigned kDoubleExponentBias = 1023;
const unsigned kFloatMantissaBits = 23; const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8; const unsigned kFloatExponentBits = 8;
@ -262,8 +242,8 @@ const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
enum Condition { enum Condition {
eq = 0, eq = 0,
ne = 1, ne = 1,
hs = 2, hs = 2, cs = hs,
lo = 3, lo = 3, cc = lo,
mi = 4, mi = 4,
pl = 5, pl = 5,
vs = 6, vs = 6,

113
deps/v8/src/arm64/cpu-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here. // CPU specific code for arm independent of OS goes here.
@ -41,28 +18,36 @@ namespace internal {
bool CpuFeatures::initialized_ = false; bool CpuFeatures::initialized_ = false;
#endif #endif
unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0; unsigned CpuFeatures::cross_compile_ = 0;
// Initialise to smallest possible cache size.
unsigned CpuFeatures::dcache_line_size_ = 1;
unsigned CpuFeatures::icache_line_size_ = 1;
class CacheLineSizes {
public:
CacheLineSizes() {
#ifdef USE_SIMULATOR
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register_));
#endif
};
void CPU::SetUp() { uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
CpuFeatures::Probe(); uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
}
private:
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
// The cache type register holds the size of the caches as a power of two.
return 1 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
}
bool CPU::SupportsCrankshaft() { uint32_t cache_type_register_;
return true; };
}
void CPU::FlushICache(void* address, size_t length) { void CPU::FlushICache(void* address, size_t length) {
if (length == 0) { if (length == 0) return;
return;
}
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address // TODO(all): consider doing some cache simulation to ensure every address
@ -76,8 +61,9 @@ void CPU::FlushICache(void* address, size_t length) {
uintptr_t start = reinterpret_cast<uintptr_t>(address); uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer. // Sizes will be used to generate a mask big enough to cover a pointer.
uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size()); CacheLineSizes sizes;
uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size()); uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2. // Cache line sizes are always a power of 2.
ASSERT(CountSetBits(dsize, 64) == 1); ASSERT(CountSetBits(dsize, 64) == 1);
ASSERT(CountSetBits(isize, 64) == 1); ASSERT(CountSetBits(isize, 64) == 1);
@ -139,26 +125,7 @@ void CPU::FlushICache(void* address, size_t length) {
} }
void CpuFeatures::Probe() { void CpuFeatures::Probe(bool serializer_enabled) {
// Compute I and D cache line size. The cache type register holds
// information about the caches.
uint32_t cache_type_register = GetCacheType();
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches as a power of
// two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 1 << dcache_line_size_power_of_two;
icache_line_size_ = 1 << icache_line_size_power_of_two;
// AArch64 has no configuration options, no further probing is required. // AArch64 has no configuration options, no further probing is required.
supported_ = 0; supported_ = 0;
@ -168,32 +135,6 @@ void CpuFeatures::Probe() {
} }
unsigned CpuFeatures::dcache_line_size() {
ASSERT(initialized_);
return dcache_line_size_;
}
unsigned CpuFeatures::icache_line_size() {
ASSERT(initialized_);
return icache_line_size_;
}
uint32_t CpuFeatures::GetCacheType() {
#ifdef USE_SIMULATOR
// This will lead to a cache with 1 byte long lines, which is fine since the
// simulator will not need this information.
return 0;
#else
uint32_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register));
return cache_type_register;
#endif
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

52
deps/v8/src/arm64/cpu-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_CPU_ARM64_H_ #ifndef V8_ARM64_CPU_ARM64_H_
#define V8_ARM64_CPU_ARM64_H_ #define V8_ARM64_CPU_ARM64_H_
@ -42,7 +19,7 @@ class CpuFeatures : public AllStatic {
public: public:
// Detect features of the target CPU. Set safe defaults if the serializer // Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable). // is enabled (snapshots must be portable).
static void Probe(); static void Probe(bool serializer_enabled);
// Check whether a feature is supported by the target CPU. // Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
@ -51,15 +28,9 @@ class CpuFeatures : public AllStatic {
return false; return false;
}; };
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { // There are no optional features for ARM64.
ASSERT(initialized_); static bool IsSafeForSnapshot(Isolate* isolate, CpuFeature f) {
// There are no optional features for ARM64. return IsSupported(f);
return false;
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
} }
// I and D cache line size in bytes. // I and D cache line size in bytes.
@ -81,21 +52,14 @@ class CpuFeatures : public AllStatic {
return true; return true;
} }
private: static bool SupportsCrankshaft() { return true; }
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
private:
#ifdef DEBUG #ifdef DEBUG
static bool initialized_; static bool initialized_;
#endif #endif
// This isn't used (and is always 0), but it is required by V8. // This isn't used (and is always 0), but it is required by V8.
static unsigned found_by_runtime_probing_only_;
static unsigned cross_compile_; static unsigned cross_compile_;
friend class PlatformFeatureScope; friend class PlatformFeatureScope;

67
deps/v8/src/arm64/debug-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -38,8 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() { bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo()); return Debug::IsDebugBreakAtReturn(rinfo());
} }
@ -67,7 +42,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5); STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5); PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
byte* entry = byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_return()->entry(); debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
// The first instruction of a patched return sequence must be a load literal // The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code. // loading the address of the debug break return code.
@ -126,7 +101,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4); STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4); PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
byte* entry = byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry(); debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
// The first instruction of a patched debug break slot must be a load literal // The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code. // loading the address of the debug break slot code.
@ -204,7 +179,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Mov(x0, 0); // No arguments. __ Mov(x0, 0); // No arguments.
__ Mov(x1, ExternalReference::debug_break(masm->isolate())); __ Mov(x1, ExternalReference::debug_break(masm->isolate()));
CEntryStub stub(1); CEntryStub stub(masm->isolate(), 1);
__ CallStub(&stub); __ CallStub(&stub);
// Restore the register values from the expression stack. // Restore the register values from the expression stack.
@ -240,6 +215,16 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
} }
void Debug::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
// Register state for CallICStub
// ----------- S t a t e -------------
// -- x1 : function
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x3.Bit(), 0, x10);
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -296,15 +281,6 @@ void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
} }
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
// -- x2 : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which // In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with // is an object - this is not generally the case so this should be used with
@ -322,17 +298,6 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
} }
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) { void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-arm64.cc). // Calling convention for CallConstructStub (from code-stubs-arm64.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -386,8 +351,6 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
const bool Debug::kFrameDropperSupported = false; const bool Debug::kFrameDropperSupported = false;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64 #endif // V8_TARGET_ARCH_ARM64

27
deps/v8/src/arm64/decoder-arm64-inl.h

@ -1,29 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved. // Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DECODER_ARM64_INL_H_ #ifndef V8_ARM64_DECODER_ARM64_INL_H_
#define V8_ARM64_DECODER_ARM64_INL_H_ #define V8_ARM64_DECODER_ARM64_INL_H_

27
deps/v8/src/arm64/decoder-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

27
deps/v8/src/arm64/decoder-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DECODER_ARM64_H_ #ifndef V8_ARM64_DECODER_ARM64_H_
#define V8_ARM64_DECODER_ARM64_H_ #define V8_ARM64_DECODER_ARM64_H_

86
deps/v8/src/arm64/deoptimizer-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -138,6 +115,43 @@ Code* Deoptimizer::NotifyStubFailureBuiltin() {
} }
#define __ masm->
static void CopyRegisterDumpToFrame(MacroAssembler* masm,
Register frame,
CPURegList reg_list,
Register scratch1,
Register scratch2,
int src_offset,
int dst_offset) {
int offset0, offset1;
CPURegList copy_to_input = reg_list;
int reg_count = reg_list.Count();
int reg_size = reg_list.RegisterSizeInBytes();
for (int i = 0; i < (reg_count / 2); i++) {
__ PeekPair(scratch1, scratch2, src_offset + (i * reg_size * 2));
offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
offset1 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
if ((offset0 + reg_size) == offset1) {
// Registers are adjacent: store in pairs.
__ Stp(scratch1, scratch2, MemOperand(frame, offset0));
} else {
// Registers are not adjacent: store individually.
__ Str(scratch1, MemOperand(frame, offset0));
__ Str(scratch2, MemOperand(frame, offset1));
}
}
if ((reg_count & 1) != 0) {
__ Peek(scratch1, src_offset + (reg_count - 1) * reg_size);
offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
__ Str(scratch1, MemOperand(frame, offset0));
}
}
#undef __
#define __ masm()-> #define __ masm()->
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {
@ -200,25 +214,13 @@ void Deoptimizer::EntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset())); __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame. // Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers; CopyRegisterDumpToFrame(masm(), x1, saved_registers, x2, x4, 0,
for (int i = 0; i < saved_registers.Count(); i++) { FrameDescription::registers_offset());
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame. // Copy FP registers to the input frame.
for (int i = 0; i < saved_fp_registers.Count(); i++) { CopyRegisterDumpToFrame(masm(), x1, saved_fp_registers, x2, x4,
// TODO(all): Look for opportunities to optimize this by using ldp/stp. kFPRegistersOffset,
int dst_offset = FrameDescription::double_registers_offset() + FrameDescription::double_registers_offset());
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack. // Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize)); __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));

30
deps/v8/src/arm64/disasm-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <assert.h> #include <assert.h>
#include <stdio.h> #include <stdio.h>
@ -1632,10 +1609,9 @@ int Disassembler::SubstituteBranchTargetField(Instruction* instr,
offset <<= kInstructionSizeLog2; offset <<= kInstructionSizeLog2;
char sign = '+'; char sign = '+';
if (offset < 0) { if (offset < 0) {
offset = -offset;
sign = '-'; sign = '-';
} }
AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, offset, AppendToOutput("#%c0x%" PRIx64 " (addr %p)", sign, Abs(offset),
instr->InstructionAtOffset(offset), Instruction::NO_CHECK); instr->InstructionAtOffset(offset), Instruction::NO_CHECK);
return 8; return 8;
} }

27
deps/v8/src/arm64/disasm-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_DISASM_ARM64_H #ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H #define V8_ARM64_DISASM_ARM64_H

27
deps/v8/src/arm64/frames-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

27
deps/v8/src/arm64/frames-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm64/constants-arm64.h" #include "arm64/constants-arm64.h"
#include "arm64/assembler-arm64.h" #include "arm64/assembler-arm64.h"

307
deps/v8/src/arm64/full-codegen-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -117,10 +94,14 @@ static void EmitStackCheck(MacroAssembler* masm_,
Label ok; Label ok;
ASSERT(jssp.Is(__ StackPointer())); ASSERT(jssp.Is(__ StackPointer()));
ASSERT(scratch.Is(jssp) == (pointers == 0)); ASSERT(scratch.Is(jssp) == (pointers == 0));
Heap::RootListIndex index;
if (pointers != 0) { if (pointers != 0) {
__ Sub(scratch, jssp, pointers * kPointerSize); __ Sub(scratch, jssp, pointers * kPointerSize);
index = Heap::kRealStackLimitRootIndex;
} else {
index = Heap::kStackLimitRootIndex;
} }
__ CompareRoot(scratch, Heap::kStackLimitRootIndex); __ CompareRoot(scratch, index);
__ B(hs, &ok); __ B(hs, &ok);
PredictableCodeSizeScope predictable(masm_, PredictableCodeSizeScope predictable(masm_,
Assembler::kCallSizeWithRelocation); Assembler::kCallSizeWithRelocation);
@ -148,8 +129,6 @@ void FullCodeGenerator::Generate() {
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
InitializeFeedbackVector();
profiling_counter_ = isolate()->factory()->NewCell( profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function()); SetFunctionPosition(function());
@ -237,7 +216,7 @@ void FullCodeGenerator::Generate() {
__ Push(x1, x10); __ Push(x1, x10);
__ CallRuntime(Runtime::kHiddenNewGlobalContext, 2); __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) { } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots); FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
__ Push(x1); __ Push(x1);
@ -297,7 +276,7 @@ void FullCodeGenerator::Generate() {
} else { } else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST; type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
} }
ArgumentsAccessStub stub(type); ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub); __ CallStub(&stub);
SetVar(arguments, x0, x1, x2); SetVar(arguments, x0, x1, x2);
@ -387,7 +366,12 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label ok; Label ok;
ASSERT(back_edge_target->is_bound()); ASSERT(back_edge_target->is_bound());
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); // We want to do a round rather than a floor of distance/kCodeSizeMultiplier
// to reduce the absolute error due to the integer division. To do that,
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
// the result).
int distance =
masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2;
int weight = Min(kMaxBackEdgeWeight, int weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier)); Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight); EmitProfilingCounterDecrement(weight);
@ -430,7 +414,7 @@ void FullCodeGenerator::EmitReturnSequence() {
if (info_->ShouldSelfOptimize()) { if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count; weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else { } else {
int distance = masm_->pc_offset(); int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
weight = Min(kMaxBackEdgeWeight, weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier)); Max(1, distance / kCodeSizeMultiplier));
} }
@ -1195,12 +1179,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register x0. Iterate through that. // We got a fixed array in register x0. Iterate through that.
__ Bind(&fixed_array); __ Bind(&fixed_array);
Handle<Object> feedback = Handle<Object>(
Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
isolate());
StoreFeedbackVectorSlot(slot, feedback);
__ LoadObject(x1, FeedbackVector()); __ LoadObject(x1, FeedbackVector());
__ Mov(x10, Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker)); __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot))); __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check. __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
@ -1359,7 +1339,9 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure && !pretenure &&
scope()->is_function_scope() && scope()->is_function_scope() &&
info->num_literals() == 0) { info->num_literals() == 0) {
FastNewClosureStub stub(info->strict_mode(), info->is_generator()); FastNewClosureStub stub(isolate(),
info->strict_mode(),
info->is_generator());
__ Mov(x2, Operand(info)); __ Mov(x2, Operand(info));
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
@ -1672,13 +1654,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
int properties_count = constant_properties->length() / 2; int properties_count = constant_properties->length() / 2;
const int max_cloned_properties = const int max_cloned_properties =
FastCloneShallowObjectStub::kMaximumClonedProperties; FastCloneShallowObjectStub::kMaximumClonedProperties;
if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() || if (expr->may_store_doubles() || expr->depth() > 1 ||
flags != ObjectLiteral::kFastElements || Serializer::enabled(isolate()) || flags != ObjectLiteral::kFastElements ||
properties_count > max_cloned_properties) { properties_count > max_cloned_properties) {
__ Push(x3, x2, x1, x0); __ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4); __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else { } else {
FastCloneShallowObjectStub stub(properties_count); FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1816,13 +1798,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() == if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) { isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub( FastCloneShallowArrayStub stub(
isolate(),
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
allocation_site_mode, allocation_site_mode,
length); length);
__ CallStub(&stub); __ CallStub(&stub);
__ IncrementCounter( __ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11); isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
} else if ((expr->depth() > 1) || Serializer::enabled() || } else if ((expr->depth() > 1) || Serializer::enabled(isolate()) ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) { length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Mov(x0, Smi::FromInt(flags)); __ Mov(x0, Smi::FromInt(flags));
__ Push(x3, x2, x1, x0); __ Push(x3, x2, x1, x0);
@ -1837,7 +1820,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
} }
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); FastCloneShallowArrayStub stub(isolate(),
mode,
allocation_site_mode,
length);
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1869,7 +1855,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else { } else {
__ Mov(x3, Smi::FromInt(i)); __ Mov(x3, Smi::FromInt(i));
StoreArrayLiteralElementStub stub; StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
} }
@ -1886,7 +1872,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) { void FullCodeGenerator::VisitAssignment(Assignment* expr) {
ASSERT(expr->target()->IsValidLeftHandSide()); ASSERT(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment"); Comment cmnt(masm_, "[ Assignment");
@ -2030,10 +2016,10 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(x10, &both_smis); patch_site.EmitJumpIfSmi(x10, &both_smis);
__ Bind(&stub_call); __ Bind(&stub_call);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(isolate(), op, mode);
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
__ B(&done); __ B(&done);
@ -2115,11 +2101,11 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op, Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ Pop(x1); __ Pop(x1);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(isolate(), op, mode);
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
context()->Plug(x0); context()->Plug(x0);
@ -2127,7 +2113,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) { void FullCodeGenerator::EmitAssignment(Expression* expr) {
ASSERT(expr->IsValidLeftHandSide()); ASSERT(expr->IsValidReferenceExpression());
// Left-hand side can only be a property, a global or a (parameter or local) // Left-hand side can only be a property, a global or a (parameter or local)
// slot. // slot.
@ -2333,16 +2319,15 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithIC(Call* expr) { void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
ASM_LOCATION("EmitCallWithIC");
Expression* callee = expr->expression(); Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
CallFunctionFlags flags; CallIC::CallType call_type = callee->IsVariableProxy()
? CallIC::FUNCTION
: CallIC::METHOD;
// Get the target function. // Get the target function.
if (callee->IsVariableProxy()) { if (call_type == CallIC::FUNCTION) {
{ StackValueContext context(this); { StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy()); EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS); PrepareForBailout(callee, NO_REGISTERS);
@ -2350,7 +2335,6 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it // Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method. // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value()); __ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else { } else {
// Load the function from the receiver. // Load the function from the receiver.
ASSERT(callee->IsProperty()); ASSERT(callee->IsProperty());
@ -2360,40 +2344,19 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
// Push the target function under the receiver. // Push the target function under the receiver.
__ Pop(x10); __ Pop(x10);
__ Push(x0, x10); __ Push(x0, x10);
flags = CALL_AS_METHOD;
} }
// Load the arguments. EmitCall(expr, call_type);
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, flags);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
} }
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) { Expression* key) {
// Load the key. // Load the key.
VisitForAccumulatorValue(key); VisitForAccumulatorValue(key);
Expression* callee = expr->expression(); Expression* callee = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
// Load the function from the receiver. // Load the function from the receiver.
ASSERT(callee->IsProperty()); ASSERT(callee->IsProperty());
@ -2405,28 +2368,12 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
__ Pop(x10); __ Pop(x10);
__ Push(x0, x10); __ Push(x0, x10);
{ PreservePositionScope scope(masm()->positions_recorder()); EmitCall(expr, CallIC::METHOD);
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, CALL_AS_METHOD);
__ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
} }
void FullCodeGenerator::EmitCallWithStub(Call* expr) { void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
// Code common for calls using the call stub. // Load the arguments.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder()); { PreservePositionScope scope(masm()->positions_recorder());
@ -2434,19 +2381,17 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
VisitForStackValue(args->at(i)); VisitForStackValue(args->at(i));
} }
} }
// Record source position for debugger. // Record source position of the IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Object> uninitialized = Handle<Code> ic = CallIC::initialize_stub(
TypeFeedbackInfo::UninitializedSentinel(isolate()); isolate(), arg_count, call_type);
StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
__ LoadObject(x2, FeedbackVector());
__ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot())); __ Mov(x3, Smi::FromInt(expr->CallFeedbackSlot()));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ Peek(x1, (arg_count + 1) * kXRegSize); __ Peek(x1, (arg_count + 1) * kXRegSize);
__ CallStub(&stub); // Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2529,7 +2474,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// Call the evaluated function. // Call the evaluated function.
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kXRegSize); __ Peek(x1, (arg_count + 1) * kXRegSize);
__ CallStub(&stub); __ CallStub(&stub);
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
@ -2538,7 +2483,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
context()->DropAndPlug(1, x0); context()->DropAndPlug(1, x0);
} else if (call_type == Call::GLOBAL_CALL) { } else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithIC(expr); EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) { } else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable). // Call to a lookup slot (dynamically introduced variable).
@ -2578,16 +2523,16 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// The receiver is either the global receiver or an object found // The receiver is either the global receiver or an object found
// by LoadContextSlot. // by LoadContextSlot.
EmitCallWithStub(expr); EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) { } else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty(); Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder()); { PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
if (property->key()->IsPropertyName()) { if (property->key()->IsPropertyName()) {
EmitCallWithIC(expr); EmitCallWithLoadIC(expr);
} else { } else {
EmitKeyedCallWithIC(expr, property->key()); EmitKeyedCallWithLoadIC(expr, property->key());
} }
} else { } else {
@ -2599,7 +2544,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ LoadRoot(x1, Heap::kUndefinedValueRootIndex); __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
__ Push(x1); __ Push(x1);
// Emit function call. // Emit function call.
EmitCallWithStub(expr); EmitCall(expr);
} }
#ifdef DEBUG #ifdef DEBUG
@ -2636,12 +2581,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Peek(x1, arg_count * kXRegSize); __ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code. // Record call targets in unoptimized code.
Handle<Object> uninitialized =
TypeFeedbackInfo::UninitializedSentinel(isolate());
StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
if (FLAG_pretenuring_call_new) { if (FLAG_pretenuring_call_new) {
StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(), EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
isolate()->factory()->NewAllocationSite());
ASSERT(expr->AllocationSiteFeedbackSlot() == ASSERT(expr->AllocationSiteFeedbackSlot() ==
expr->CallNewFeedbackSlot() + 1); expr->CallNewFeedbackSlot() + 1);
} }
@ -2649,8 +2590,8 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ LoadObject(x2, FeedbackVector()); __ LoadObject(x2, FeedbackVector());
__ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot())); __ Mov(x3, Smi::FromInt(expr->CallNewFeedbackSlot()));
CallConstructStub stub(RECORD_CALL_TARGET); CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG); PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(x0); context()->Plug(x0);
} }
@ -3033,7 +2974,7 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
__ Mov(x1, x0); __ Mov(x1, x0);
__ Mov(x0, Smi::FromInt(info_->scope()->num_parameters())); __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(x0); context()->Plug(x0);
} }
@ -3124,31 +3065,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitLog(CallRuntime* expr) {
// Conditionally generate a log call.
// Args:
// 0 (literal string): The type of logging (corresponds to the flags).
// This is used to determine whether or not to generate the log call.
// 1 (string): Format string. Access the string at argument index 2
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(args->length(), 3);
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
context()->Plug(x0);
}
void FullCodeGenerator::EmitSubString(CallRuntime* expr) { void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub. // Load the arguments on the stack and call the stub.
SubStringStub stub; SubStringStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3); ASSERT(args->length() == 3);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -3161,7 +3080,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
// Load the arguments on the stack and call the stub. // Load the arguments on the stack and call the stub.
RegExpExecStub stub; RegExpExecStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 4); ASSERT(args->length() == 4);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -3303,7 +3222,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
MathPowStub stub(MathPowStub::ON_STACK); MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(x0); context()->Plug(x0);
} }
@ -3345,7 +3264,7 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
// Load the argument into x0 and call the stub. // Load the argument into x0 and call the stub.
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub; NumberToStringStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(x0); context()->Plug(x0);
} }
@ -3473,7 +3392,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(1)); VisitForAccumulatorValue(args->at(1));
__ Pop(x1); __ Pop(x1);
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(x0); context()->Plug(x0);
@ -3486,32 +3405,12 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
StringCompareStub stub; StringCompareStub stub(isolate());
__ CallStub(&stub); __ CallStub(&stub);
context()->Plug(x0); context()->Plug(x0);
} }
void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_log, 1);
context()->Plug(x0);
}
void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
// Load the argument on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kMath_sqrt, 1);
context()->Plug(x0);
}
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCallFunction"); ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
@ -3545,7 +3444,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub; RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3); ASSERT(args->length() == 3);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
@ -3889,7 +3788,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Record source position of the IC call. // Record source position of the IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kPointerSize); __ Peek(x1, (arg_count + 1) * kPointerSize);
__ CallStub(&stub); __ CallStub(&stub);
@ -4021,7 +3920,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
ASSERT(expr->expression()->IsValidLeftHandSide()); ASSERT(expr->expression()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ CountOperation"); Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
@ -4107,7 +4006,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ B(&stub_call); __ B(&stub_call);
__ Bind(&slow); __ Bind(&slow);
} }
ToNumberStub convert_stub; ToNumberStub convert_stub(isolate());
__ CallStub(&convert_stub); __ CallStub(&convert_stub);
// Save result for postfix expressions. // Save result for postfix expressions.
@ -4139,8 +4038,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
__ Bind(&done); __ Bind(&done);
@ -4254,13 +4153,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} }
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (check->Equals(isolate()->heap()->number_string())) { Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string");
__ JumpIfSmi(x0, if_true); __ JumpIfSmi(x0, if_true);
__ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
__ CompareRoot(x0, Heap::kHeapNumberMapRootIndex); __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->string_string())) { } else if (String::Equals(check, factory->string_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string");
__ JumpIfSmi(x0, if_false); __ JumpIfSmi(x0, if_false);
// Check for undetectable objects => false. // Check for undetectable objects => false.
@ -4268,22 +4168,22 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
__ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false, __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false,
fall_through); fall_through);
} else if (check->Equals(isolate()->heap()->symbol_string())) { } else if (String::Equals(check, factory->symbol_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string");
__ JumpIfSmi(x0, if_false); __ JumpIfSmi(x0, if_false);
__ CompareObjectType(x0, x0, x1, SYMBOL_TYPE); __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->boolean_string())) { } else if (String::Equals(check, factory->boolean_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string");
__ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true); __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
__ CompareRoot(x0, Heap::kFalseValueRootIndex); __ CompareRoot(x0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof && } else if (FLAG_harmony_typeof &&
check->Equals(isolate()->heap()->null_string())) { String::Equals(check, factory->null_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
__ CompareRoot(x0, Heap::kNullValueRootIndex); __ CompareRoot(x0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->undefined_string())) { } else if (String::Equals(check, factory->undefined_string())) {
ASM_LOCATION( ASM_LOCATION(
"FullCodeGenerator::EmitLiteralCompareTypeof undefined_string"); "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true); __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
@ -4293,7 +4193,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
__ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true, __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true,
fall_through); fall_through);
} else if (check->Equals(isolate()->heap()->function_string())) { } else if (String::Equals(check, factory->function_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
__ JumpIfSmi(x0, if_false); __ JumpIfSmi(x0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@ -4301,7 +4201,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false, __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
fall_through); fall_through);
} else if (check->Equals(isolate()->heap()->object_string())) { } else if (String::Equals(check, factory->object_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string"); ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false); __ JumpIfSmi(x0, if_false);
if (!FLAG_harmony_typeof) { if (!FLAG_harmony_typeof) {
@ -4360,7 +4260,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: { case Token::INSTANCEOF: {
VisitForStackValue(expr->right()); VisitForStackValue(expr->right());
InstanceofStub stub(InstanceofStub::kNoFlags); InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ CallStub(&stub); __ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true. // The stub returns 0 for true.
@ -4568,7 +4468,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
CallIC(ic, TypeFeedbackId::None()); CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0); __ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize); __ Poke(x1, 2 * kPointerSize);
CallFunctionStub stub(1, CALL_AS_METHOD); CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub); __ CallStub(&stub);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -4721,7 +4621,7 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label gc_required; Label gc_required;
Label allocated; Label allocated;
Handle<Map> map(isolate()->native_context()->generator_result_map()); Handle<Map> map(isolate()->native_context()->iterator_result_map());
// Allocate and populate an object with this form: { value: VAL, done: DONE } // Allocate and populate an object with this form: { value: VAL, done: DONE }
@ -4740,22 +4640,23 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Register result_value = x2; Register result_value = x2;
Register boolean_done = x3; Register boolean_done = x3;
Register empty_fixed_array = x4; Register empty_fixed_array = x4;
Register untagged_result = x5;
__ Mov(map_reg, Operand(map)); __ Mov(map_reg, Operand(map));
__ Pop(result_value); __ Pop(result_value);
__ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done))); __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
__ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array())); __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize); ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
// TODO(jbramley): Use Stp if possible. STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
__ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset)); JSObject::kElementsOffset);
__ Str(empty_fixed_array, STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
FieldMemOperand(result, JSObject::kPropertiesOffset)); JSGeneratorObject::kResultDonePropertyOffset);
__ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset)); __ ObjectUntag(untagged_result, result);
__ Str(result_value, __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
FieldMemOperand(result, __ Stp(empty_fixed_array, empty_fixed_array,
JSGeneratorObject::kResultValuePropertyOffset)); MemOperand(untagged_result, JSObject::kPropertiesOffset));
__ Str(boolean_done, __ Stp(result_value, boolean_done,
FieldMemOperand(result, MemOperand(untagged_result,
JSGeneratorObject::kResultDonePropertyOffset)); JSGeneratorObject::kResultValuePropertyOffset));
// Only the value field needs a write barrier, as the other values are in the // Only the value field needs a write barrier, as the other values are in the
// root set. // root set.
@ -4835,8 +4736,9 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference has_pending_message = ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate()); ExternalReference::address_of_has_pending_message(isolate());
STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ Mov(x11, has_pending_message); __ Mov(x11, has_pending_message);
__ Ldr(x11, MemOperand(x11)); __ Ldrb(x11, MemOperand(x11));
__ SmiTag(x11); __ SmiTag(x11);
__ Push(x10, x11); __ Push(x10, x11);
@ -4864,7 +4766,8 @@ void FullCodeGenerator::ExitFinallyBlock() {
ExternalReference has_pending_message = ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate()); ExternalReference::address_of_has_pending_message(isolate());
__ Mov(x13, has_pending_message); __ Mov(x13, has_pending_message);
__ Str(x11, MemOperand(x13)); STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
__ Strb(x11, MemOperand(x13));
ExternalReference pending_message_obj = ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate()); ExternalReference::address_of_pending_message_obj(isolate());

28
deps/v8/src/arm64/ic-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -1044,7 +1021,6 @@ static void KeyedStoreGenerateGenericHelper(
elements, elements,
x10, x10,
d0, d0,
d1,
&transition_double_elements); &transition_double_elements);
if (increment_length == kIncrementLength) { if (increment_length == kIncrementLength) {
// Add 1 to receiver->length. // Add 1 to receiver->length.

42
deps/v8/src/arm64/instructions-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -254,11 +231,18 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
void Instruction::SetPCRelImmTarget(Instruction* target) { void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction. // ADRP is not supported, so 'this' must point to an ADR instruction.
ASSERT(Mask(PCRelAddressingMask) == ADR); ASSERT(IsAdr());
Instr imm = Assembler::ImmPCRelAddress(DistanceTo(target)); int target_offset = DistanceTo(target);
Instr imm;
SetInstructionBits(Mask(~ImmPCRel_mask) | imm); if (Instruction::IsValidPCRelOffset(target_offset)) {
imm = Assembler::ImmPCRelAddress(target_offset);
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(this,
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target);
}
} }

46
deps/v8/src/arm64/instructions-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_ #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
#define V8_ARM64_INSTRUCTIONS_ARM64_H_ #define V8_ARM64_INSTRUCTIONS_ARM64_H_
@ -160,9 +137,10 @@ class Instruction {
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi. // formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const { int ImmPCRel() const {
ASSERT(IsPCRelAddressing());
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo()); int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width; int const width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width-1, 0, offset); return signed_bitextract_32(width - 1, 0, offset);
} }
uint64_t ImmLogical(); uint64_t ImmLogical();
@ -191,6 +169,10 @@ class Instruction {
return Mask(TestBranchFMask) == TestBranchFixed; return Mask(TestBranchFMask) == TestBranchFixed;
} }
bool IsImmBranch() const {
return BranchType() != UnknownBranchType;
}
bool IsLdrLiteral() const { bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed; return Mask(LoadLiteralFMask) == LoadLiteralFixed;
} }
@ -203,6 +185,10 @@ class Instruction {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
} }
bool IsAdr() const {
return Mask(PCRelAddressingMask) == ADR;
}
bool IsLogicalImmediate() const { bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
} }
@ -211,6 +197,10 @@ class Instruction {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
} }
bool IsAddSubShifted() const {
return Mask(AddSubShiftedFMask) == AddSubShiftedFixed;
}
bool IsAddSubExtended() const { bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
} }
@ -387,6 +377,10 @@ class Instruction {
} }
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(int offset) {
return is_int21(offset);
}
void SetPCRelImmTarget(Instruction* target); void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target); void SetBranchImmTarget(Instruction* target);
}; };

27
deps/v8/src/arm64/instrument-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm64/instrument-arm64.h" #include "arm64/instrument-arm64.h"

27
deps/v8/src/arm64/instrument-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_INSTRUMENT_ARM64_H_ #ifndef V8_ARM64_INSTRUMENT_ARM64_H_
#define V8_ARM64_INSTRUMENT_ARM64_H_ #define V8_ARM64_INSTRUMENT_ARM64_H_

436
deps/v8/src/arm64/lithium-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"
@ -515,6 +492,8 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
!hinstr->HasObservableSideEffects(); !hinstr->HasObservableSideEffects();
if (needs_environment && !instr->HasEnvironment()) { if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr); instr = AssignEnvironment(instr);
// We can't really figure out if the environment is needed or not.
instr->environment()->set_has_been_used();
} }
return instr; return instr;
@ -541,6 +520,19 @@ LUnallocated* LChunkBuilder::TempRegister() {
} }
LUnallocated* LChunkBuilder::TempDoubleRegister() {
LUnallocated* operand =
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
int vreg = allocator_->GetVirtualRegister();
if (!allocator_->AllocationOk()) {
Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
vreg = 0;
}
operand->set_virtual_register(vreg);
return operand;
}
int LPlatformChunk::GetNextSpillIndex() { int LPlatformChunk::GetNextSpillIndex() {
return spill_slot_count_++; return spill_slot_count_++;
} }
@ -702,7 +694,8 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
// the it was just a plain use), so it is free to move the split child into // the it was just a plain use), so it is free to move the split child into
// the same register that is used for the use-at-start. // the same register that is used for the use-at-start.
// See https://code.google.com/p/chromium/issues/detail?id=201590 // See https://code.google.com/p/chromium/issues/detail?id=201590
if (!(instr->ClobbersRegisters() && instr->ClobbersDoubleRegisters())) { if (!(instr->ClobbersRegisters() &&
instr->ClobbersDoubleRegisters(isolate()))) {
int fixed = 0; int fixed = 0;
int used_at_start = 0; int used_at_start = 0;
for (UseIterator it(instr); !it.Done(); it.Advance()) { for (UseIterator it(instr); !it.Done(); it.Advance()) {
@ -846,6 +839,12 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsSmiOrInteger32()) { if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = LOperand* right =
UseRegisterOrConstantAtStart(instr->BetterRightOperand()); UseRegisterOrConstantAtStart(instr->BetterRightOperand());
@ -926,6 +925,11 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = LOperand* right =
UseRegisterOrConstantAtStart(instr->BetterRightOperand()); UseRegisterOrConstantAtStart(instr->BetterRightOperand());
@ -947,9 +951,16 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index()); if (!FLAG_debug_code && instr->skip_check()) return NULL;
LOperand* length = UseRegister(instr->length()); LOperand* index = UseRegisterOrConstantAtStart(instr->index());
return AssignEnvironment(new(zone()) LBoundsCheck(value, length)); LOperand* length = !index->IsConstantOperand()
? UseRegisterOrConstantAtStart(instr->length())
: UseRegisterAtStart(instr->length());
LInstruction* result = new(zone()) LBoundsCheck(index, length);
if (!FLAG_debug_code || !instr->skip_check()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -1074,63 +1085,59 @@ LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) { LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from(); Representation from = instr->from();
Representation to = instr->to(); Representation to = instr->to();
HValue* val = instr->value();
if (from.IsSmi()) { if (from.IsSmi()) {
if (to.IsTagged()) { if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
return DefineSameAsFirst(new(zone()) LDummyUse(value)); return DefineSameAsFirst(new(zone()) LDummyUse(value));
} }
from = Representation::Tagged(); from = Representation::Tagged();
} }
if (from.IsTagged()) { if (from.IsTagged()) {
if (to.IsDouble()) { if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LOperand* temp = TempRegister(); LOperand* temp = TempRegister();
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp); LInstruction* result =
return AssignEnvironment(DefineAsRegister(res)); DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
} else if (to.IsSmi()) { } else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
if (instr->value()->type().IsSmi()) { if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value)); return DefineSameAsFirst(new(zone()) LDummyUse(value));
} }
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else { } else {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
LInstruction* res = NULL; if (val->type().IsSmi() || val->representation().IsSmi()) {
LOperand* value = UseRegisterAtStart(val);
if (instr->value()->type().IsSmi() || return DefineAsRegister(new(zone()) LSmiUntag(value, false));
instr->value()->representation().IsSmi()) {
LOperand* value = UseRegisterAtStart(instr->value());
res = DefineAsRegister(new(zone()) LSmiUntag(value, false));
} else { } else {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? NULL : FixedTemp(d24); LOperand* temp2 = instr->CanTruncateToInt32()
res = DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2)); ? NULL : TempDoubleRegister();
res = AssignEnvironment(res); LInstruction* result =
DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
if (!val->representation().IsSmi()) result = AssignEnvironment(result);
return result;
} }
return res;
} }
} else if (from.IsDouble()) { } else if (from.IsDouble()) {
if (to.IsTagged()) { if (to.IsTagged()) {
info()->MarkAsDeferredCalling(); info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
return AssignPointerMap(DefineAsRegister(result)); return AssignPointerMap(DefineAsRegister(result));
} else { } else {
ASSERT(to.IsSmi() || to.IsInteger32()); ASSERT(to.IsSmi() || to.IsInteger32());
LOperand* value = UseRegister(instr->value());
if (instr->CanTruncateToInt32()) { if (instr->CanTruncateToInt32()) {
LTruncateDoubleToIntOrSmi* result = LOperand* value = UseRegister(val);
new(zone()) LTruncateDoubleToIntOrSmi(value); return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
return DefineAsRegister(result);
} else { } else {
LOperand* value = UseRegister(val);
LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value); LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
return AssignEnvironment(DefineAsRegister(result)); return AssignEnvironment(DefineAsRegister(result));
} }
@ -1138,37 +1145,35 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsInteger32()) { } else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling(); info()->MarkAsDeferredCalling();
if (to.IsTagged()) { if (to.IsTagged()) {
if (instr->value()->CheckFlag(HInstruction::kUint32)) { if (val->CheckFlag(HInstruction::kUint32)) {
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(val);
LNumberTagU* result = new(zone()) LNumberTagU(value, LNumberTagU* result =
TempRegister(), new(zone()) LNumberTagU(value, TempRegister(), TempRegister());
TempRegister()); return AssignPointerMap(DefineAsRegister(result));
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else { } else {
STATIC_ASSERT((kMinInt == Smi::kMinValue) && STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
(kMaxInt == Smi::kMaxValue)); (kMaxInt == Smi::kMaxValue));
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(val);
return DefineAsRegister(new(zone()) LSmiTag(value)); return DefineAsRegister(new(zone()) LSmiTag(value));
} }
} else if (to.IsSmi()) { } else if (to.IsSmi()) {
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(val);
LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value)); LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
if (instr->value()->CheckFlag(HInstruction::kUint32)) { if (val->CheckFlag(HInstruction::kUint32)) {
result = AssignEnvironment(result); result = AssignEnvironment(result);
} }
return result; return result;
} else { } else {
ASSERT(to.IsDouble()); ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) { if (val->CheckFlag(HInstruction::kUint32)) {
return DefineAsRegister( return DefineAsRegister(
new(zone()) LUint32ToDouble(UseRegisterAtStart(instr->value()))); new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
} else { } else {
return DefineAsRegister( return DefineAsRegister(
new(zone()) LInteger32ToDouble(UseRegisterAtStart(instr->value()))); new(zone()) LInteger32ToDouble(UseRegisterAtStart(val)));
} }
} }
} }
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;
} }
@ -1189,27 +1194,23 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) { LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
if (instr->CanOmitMapChecks()) { if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
// LCheckMaps does nothing in this case. LOperand* value = UseRegisterAtStart(instr->value());
return new(zone()) LCheckMaps(NULL); LOperand* temp = TempRegister();
} else { LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp));
LOperand* value = UseRegisterAtStart(instr->value()); if (instr->HasMigrationTarget()) {
LOperand* temp = TempRegister(); info()->MarkAsDeferredCalling();
result = AssignPointerMap(result);
if (instr->has_migration_target()) {
info()->MarkAsDeferredCalling();
LInstruction* result = new(zone()) LCheckMaps(value, temp);
return AssignPointerMap(AssignEnvironment(result));
} else {
return AssignEnvironment(new(zone()) LCheckMaps(value, temp));
}
} }
return result;
} }
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) { LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckNonSmi(value)); LInstruction* result = new(zone()) LCheckNonSmi(value);
if (!instr->value()->IsHeapObject()) result = AssignEnvironment(result);
return result;
} }
@ -1232,7 +1233,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
return AssignEnvironment( return AssignEnvironment(
DefineAsRegister(new(zone()) LClampTToUint8(reg, DefineAsRegister(new(zone()) LClampTToUint8(reg,
TempRegister(), TempRegister(),
FixedTemp(d24)))); TempDoubleRegister())));
} }
} }
@ -1249,8 +1250,9 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) { HCompareNumericAndBranch* instr) {
LInstruction* goto_instr = CheckElideControlInstruction(instr);
if (goto_instr != NULL) return goto_instr;
Representation r = instr->representation(); Representation r = instr->representation();
if (r.IsSmiOrInteger32()) { if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(r)); ASSERT(instr->left()->representation().Equals(r));
ASSERT(instr->right()->representation().Equals(r)); ASSERT(instr->right()->representation().Equals(r));
@ -1418,8 +1420,12 @@ LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
LOperand* divisor = UseRegister(instr->right()); LOperand* divisor = UseRegister(instr->right());
LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
? NULL : TempRegister(); ? NULL : TempRegister();
LDivI* div = new(zone()) LDivI(dividend, divisor, temp); LInstruction* result =
return AssignEnvironment(DefineAsRegister(div)); DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
result = AssignEnvironment(result);
}
return result;
} }
@ -1447,6 +1453,7 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment(); HEnvironment* outer = current_block_->last_environment();
outer->set_ast_id(instr->ReturnId());
HConstant* undefined = graph()->GetConstantUndefined(); HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(), HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(), instr->arguments_count(),
@ -1622,7 +1629,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value()); LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result = LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context)); DefineAsRegister(new(zone()) LLoadContextSlot(context));
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -1656,7 +1666,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsSmiOrInteger32()); ASSERT(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind(); ElementsKind elements_kind = instr->elements_kind();
LOperand* elements = UseRegister(instr->elements()); LOperand* elements = UseRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* key = UseRegisterOrConstant(instr->key());
if (!instr->is_typed_elements()) { if (!instr->is_typed_elements()) {
if (instr->representation().IsDouble()) { if (instr->representation().IsDouble()) {
@ -1687,17 +1697,14 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
IsDoubleOrFloatElementsKind(instr->elements_kind()))); IsDoubleOrFloatElementsKind(instr->elements_kind())));
LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
LLoadKeyedExternal* result = LInstruction* result = DefineAsRegister(
new(zone()) LLoadKeyedExternal(elements, key, temp); new(zone()) LLoadKeyedExternal(elements, key, temp));
// An unsigned int array load might overflow and cause a deopt. Make sure it if ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
// has an environment. elements_kind == UINT32_ELEMENTS) &&
if (instr->RequiresHoleCheck() || !instr->CheckFlag(HInstruction::kUint32)) {
elements_kind == EXTERNAL_UINT32_ELEMENTS || result = AssignEnvironment(result);
elements_kind == UINT32_ELEMENTS) {
return AssignEnvironment(DefineAsRegister(result));
} else {
return DefineAsRegister(result);
} }
return result;
} }
} }
@ -1885,13 +1892,10 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
bool needs_environment = can_overflow || bailout_on_minus_zero;
HValue* least_const = instr->BetterLeftOperand(); HValue* least_const = instr->BetterLeftOperand();
HValue* most_const = instr->BetterRightOperand(); HValue* most_const = instr->BetterRightOperand();
LOperand* left;
// LMulConstI can handle a subset of constants: // LMulConstI can handle a subset of constants:
// With support for overflow detection: // With support for overflow detection:
// -1, 0, 1, 2 // -1, 0, 1, 2
@ -1911,26 +1915,27 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
IsPowerOf2(constant_abs - 1))))) { IsPowerOf2(constant_abs - 1))))) {
LConstantOperand* right = UseConstant(most_const); LConstantOperand* right = UseConstant(most_const);
bool need_register = IsPowerOf2(constant_abs) && !small_constant; bool need_register = IsPowerOf2(constant_abs) && !small_constant;
left = need_register ? UseRegister(least_const) LOperand* left = need_register ? UseRegister(least_const)
: UseRegisterAtStart(least_const); : UseRegisterAtStart(least_const);
LMulConstIS* mul = new(zone()) LMulConstIS(left, right); LInstruction* result =
if (needs_environment) AssignEnvironment(mul); DefineAsRegister(new(zone()) LMulConstIS(left, right));
return DefineAsRegister(mul); if ((bailout_on_minus_zero && constant <= 0) || can_overflow) {
result = AssignEnvironment(result);
}
return result;
} }
} }
left = UseRegisterAtStart(least_const);
// LMulI/S can handle all cases, but it requires that a register is // LMulI/S can handle all cases, but it requires that a register is
// allocated for the second operand. // allocated for the second operand.
LInstruction* result; LOperand* left = UseRegisterAtStart(least_const);
if (instr->representation().IsSmi()) { LOperand* right = UseRegisterAtStart(most_const);
LOperand* right = UseRegisterAtStart(most_const); LInstruction* result = instr->representation().IsSmi()
result = DefineAsRegister(new(zone()) LMulS(left, right)); ? DefineAsRegister(new(zone()) LMulS(left, right))
} else { : DefineAsRegister(new(zone()) LMulI(left, right));
LOperand* right = UseRegisterAtStart(most_const); if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) {
result = DefineAsRegister(new(zone()) LMulI(left, right)); result = AssignEnvironment(result);
} }
if (needs_environment) AssignEnvironment(result);
return result; return result;
} else if (instr->representation().IsDouble()) { } else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr); return DoArithmeticD(Token::MUL, instr);
@ -1956,7 +1961,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
} else { } else {
ASSERT(info()->IsStub()); ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); info()->code_stub()->GetInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor->GetParameterRegister(index); Register reg = descriptor->GetParameterRegister(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
@ -2045,6 +2050,117 @@ LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
} }
HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
HValue** left) {
if (!val->representation().IsInteger32()) return NULL;
if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
HBinaryOperation* hinstr = HBinaryOperation::cast(val);
HValue* hleft = hinstr->left();
HValue* hright = hinstr->right();
ASSERT(hleft->representation().Equals(hinstr->representation()));
ASSERT(hright->representation().Equals(hinstr->representation()));
if ((hright->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
(hinstr->IsCommutative() && hleft->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
// The constant operand will likely fit in the immediate field. We are
// better off with
// lsl x8, x9, #imm
// add x0, x8, #imm2
// than with
// mov x16, #imm2
// add x0, x16, x9 LSL #imm
return NULL;
}
HBitwiseBinaryOperation* shift = NULL;
// TODO(aleram): We will miss situations where a shift operation is used by
// different instructions both as a left and right operands.
if (hright->IsBitwiseBinaryShift() &&
HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
shift = HBitwiseBinaryOperation::cast(hright);
if (left != NULL) {
*left = hleft;
}
} else if (hinstr->IsCommutative() &&
hleft->IsBitwiseBinaryShift() &&
HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
shift = HBitwiseBinaryOperation::cast(hleft);
if (left != NULL) {
*left = hright;
}
} else {
return NULL;
}
if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
// Shifts right by zero can deoptimize.
return NULL;
}
return shift;
}
bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
if (!shift->representation().IsInteger32()) {
return false;
}
for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
if (shift != CanTransformToShiftedOp(it.value())) {
return false;
}
}
return true;
}
LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
HBinaryOperation* instr) {
HValue* left;
HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
return DoShiftedBinaryOp(instr, left, shift);
}
return NULL;
}
LInstruction* LChunkBuilder::DoShiftedBinaryOp(
HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
ASSERT(hshift->IsBitwiseBinaryShift());
ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
LTemplateResultInstruction<1>* res;
LOperand* left = UseRegisterAtStart(hleft);
LOperand* right = UseRegisterAtStart(hshift->left());
LOperand* shift_amount = UseConstant(hshift->right());
Shift shift_op;
switch (hshift->opcode()) {
case HValue::kShl: shift_op = LSL; break;
case HValue::kShr: shift_op = LSR; break;
case HValue::kSar: shift_op = ASR; break;
default: UNREACHABLE(); shift_op = NO_SHIFT;
}
if (hinstr->IsBitwise()) {
res = new(zone()) LBitI(left, right, shift_op, shift_amount);
} else if (hinstr->IsAdd()) {
res = new(zone()) LAddI(left, right, shift_op, shift_amount);
} else {
ASSERT(hinstr->IsSub());
res = new(zone()) LSubI(left, right, shift_op, shift_amount);
}
if (hinstr->CheckFlag(HValue::kCanOverflow)) {
AssignEnvironment(res);
}
return DefineAsRegister(res);
}
LInstruction* LChunkBuilder::DoShift(Token::Value op, LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) { HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) { if (instr->representation().IsTagged()) {
@ -2056,6 +2172,10 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
if (ShiftCanBeOptimizedAway(instr)) {
return NULL;
}
LOperand* left = instr->representation().IsSmi() LOperand* left = instr->representation().IsSmi()
? UseRegister(instr->left()) ? UseRegister(instr->left())
: UseRegisterAtStart(instr->left()); : UseRegisterAtStart(instr->left());
@ -2066,8 +2186,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
int constant_value = 0; int constant_value = 0;
if (right_value->IsConstant()) { if (right_value->IsConstant()) {
right = UseConstant(right_value); right = UseConstant(right_value);
HConstant* constant = HConstant::cast(right_value); constant_value = JSShiftAmountFromHConstant(right_value);
constant_value = constant->Integer32Value() & 0x1f;
} else { } else {
right = UseRegisterAtStart(right_value); right = UseRegisterAtStart(right_value);
if (op == Token::ROR) { if (op == Token::ROR) {
@ -2160,7 +2279,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value()); value = UseRegister(instr->value());
} }
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp); LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result; if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
result = AssignEnvironment(result);
}
return result;
} }
@ -2177,10 +2299,10 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* temp = NULL; LOperand* temp = NULL;
LOperand* elements = NULL; LOperand* elements = NULL;
LOperand* val = NULL; LOperand* val = NULL;
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
if (!instr->is_typed_elements() && if (!instr->is_typed_elements() &&
instr->value()->representation().IsTagged() && instr->value()->representation().IsTagged() &&
@ -2294,7 +2416,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context()); LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result = LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index); new(zone()) LStringCharCodeAt(context, string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); return AssignPointerMap(DefineAsRegister(result));
} }
@ -2324,6 +2446,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsSmiOrInteger32()) { if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->left()->representation().Equals(instr->representation()));
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
if (shifted_operation != NULL) {
return shifted_operation;
}
LOperand *left; LOperand *left;
if (instr->left()->IsConstant() && if (instr->left()->IsConstant() &&
(HConstant::cast(instr->left())->Integer32Value() == 0)) { (HConstant::cast(instr->left())->Integer32Value() == 0)) {
@ -2365,17 +2493,18 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
LOperand* object = UseRegister(instr->object());
if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, NULL, new(zone()) LTransitionElementsKind(object, NULL,
TempRegister(), TempRegister()); TempRegister(), TempRegister());
return result; return result;
} else { } else {
LOperand* object = UseFixed(instr->object(), x0);
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LTransitionElementsKind* result = LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, context, TempRegister()); new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
return AssignPointerMap(result); return MarkAsCall(result, instr);
} }
} }
@ -2429,29 +2558,21 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LOperand* temp3 = TempRegister(); LOperand* temp3 = TempRegister();
LMathAbsTagged* result = LInstruction* result = DefineAsRegister(
new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3); new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3));
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); return AssignEnvironment(AssignPointerMap(result));
} else { } else {
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegisterAtStart(instr->value());
LMathAbs* result = new(zone()) LMathAbs(input); LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input));
if (r.IsDouble()) { if (!r.IsDouble()) result = AssignEnvironment(result);
// The Double case can never fail so it doesn't need an environment. return result;
return DefineAsRegister(result);
} else {
ASSERT(r.IsInteger32() || r.IsSmi());
// The Integer32 and Smi cases need an environment because they can
// deoptimize on minimum representable number.
return AssignEnvironment(DefineAsRegister(result));
}
} }
} }
case kMathExp: { case kMathExp: {
ASSERT(instr->representation().IsDouble()); ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble()); ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseRegister(instr->value()); LOperand* input = UseRegister(instr->value());
// TODO(all): Implement TempFPRegister. LOperand* double_temp1 = TempDoubleRegister();
LOperand* double_temp1 = FixedTemp(d24); // This was chosen arbitrarily.
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
LOperand* temp3 = TempRegister(); LOperand* temp3 = TempRegister();
@ -2460,14 +2581,16 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result); return DefineAsRegister(result);
} }
case kMathFloor: { case kMathFloor: {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->value()->representation().IsDouble()); ASSERT(instr->value()->representation().IsDouble());
// TODO(jbramley): ARM64 can easily handle a double argument with frintm,
// but we're never asked for it here. At the moment, we fall back to the
// runtime if the result doesn't fit, like the other architectures.
LOperand* input = UseRegisterAtStart(instr->value()); LOperand* input = UseRegisterAtStart(instr->value());
LMathFloor* result = new(zone()) LMathFloor(input); if (instr->representation().IsInteger32()) {
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); LMathFloorI* result = new(zone()) LMathFloorI(input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
} else {
ASSERT(instr->representation().IsDouble());
LMathFloorD* result = new(zone()) LMathFloorD(input);
return DefineAsRegister(result);
}
} }
case kMathLog: { case kMathLog: {
ASSERT(instr->representation().IsDouble()); ASSERT(instr->representation().IsDouble());
@ -2483,14 +2606,17 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(new(zone()) LMathPowHalf(input)); return DefineAsRegister(new(zone()) LMathPowHalf(input));
} }
case kMathRound: { case kMathRound: {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->value()->representation().IsDouble()); ASSERT(instr->value()->representation().IsDouble());
// TODO(jbramley): As with kMathFloor, we can probably handle double
// results fairly easily, but we are never asked for them.
LOperand* input = UseRegister(instr->value()); LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(d24); // Choosen arbitrarily. if (instr->representation().IsInteger32()) {
LMathRound* result = new(zone()) LMathRound(input, temp); LOperand* temp = TempDoubleRegister();
return AssignEnvironment(DefineAsRegister(result)); LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
return AssignEnvironment(DefineAsRegister(result));
} else {
ASSERT(instr->representation().IsDouble());
LMathRoundD* result = new(zone()) LMathRoundD(input);
return DefineAsRegister(result);
}
} }
case kMathSqrt: { case kMathSqrt: {
ASSERT(instr->representation().IsDouble()); ASSERT(instr->representation().IsDouble());
@ -2561,7 +2687,9 @@ LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) { LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
LOperand* object = UseRegisterAtStart(instr->object()); LOperand* object = UseRegisterAtStart(instr->object());
LOperand* index = UseRegister(instr->index()); LOperand* index = UseRegister(instr->index());
return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index)); LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
LInstruction* result = DefineSameAsFirst(load);
return AssignPointerMap(result);
} }

175
deps/v8/src/arm64/lithium-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_LITHIUM_ARM64_H_ #ifndef V8_ARM64_LITHIUM_ARM64_H_
#define V8_ARM64_LITHIUM_ARM64_H_ #define V8_ARM64_LITHIUM_ARM64_H_
@ -138,11 +115,13 @@ class LCodeGen;
V(MathAbsTagged) \ V(MathAbsTagged) \
V(MathClz32) \ V(MathClz32) \
V(MathExp) \ V(MathExp) \
V(MathFloor) \ V(MathFloorD) \
V(MathFloorI) \
V(MathLog) \ V(MathLog) \
V(MathMinMax) \ V(MathMinMax) \
V(MathPowHalf) \ V(MathPowHalf) \
V(MathRound) \ V(MathRoundD) \
V(MathRoundI) \
V(MathSqrt) \ V(MathSqrt) \
V(ModByConstI) \ V(ModByConstI) \
V(ModByPowerOf2I) \ V(ModByPowerOf2I) \
@ -270,7 +249,9 @@ class LInstruction : public ZoneObject {
// Interface to the register allocator and iterators. // Interface to the register allocator and iterators.
bool ClobbersTemps() const { return IsCall(); } bool ClobbersTemps() const { return IsCall(); }
bool ClobbersRegisters() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); }
virtual bool ClobbersDoubleRegisters() const { return IsCall(); } virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
return IsCall();
}
bool IsMarkedAsCall() const { return IsCall(); } bool IsMarkedAsCall() const { return IsCall(); }
virtual bool HasResult() const = 0; virtual bool HasResult() const = 0;
@ -584,7 +565,14 @@ class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> { class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public: public:
LAddI(LOperand* left, LOperand* right) { LAddI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
inputs_[0] = left; inputs_[0] = left;
inputs_[1] = right; inputs_[1] = right;
} }
@ -592,8 +580,15 @@ class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; } LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; } LOperand* right() { return inputs_[1]; }
Shift shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i") DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add) DECLARE_HYDROGEN_ACCESSOR(Add)
protected:
Shift shift_;
LOperand* shift_amount_;
}; };
@ -753,7 +748,14 @@ class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> { class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public: public:
LBitI(LOperand* left, LOperand* right) { LBitI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
inputs_[0] = left; inputs_[0] = left;
inputs_[1] = right; inputs_[1] = right;
} }
@ -761,10 +763,17 @@ class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; } LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; } LOperand* right() { return inputs_[1]; }
Shift shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
Token::Value op() const { return hydrogen()->op(); } Token::Value op() const { return hydrogen()->op(); }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i") DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
DECLARE_HYDROGEN_ACCESSOR(Bitwise) DECLARE_HYDROGEN_ACCESSOR(Bitwise)
protected:
Shift shift_;
LOperand* shift_amount_;
}; };
@ -887,7 +896,7 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime) DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
return save_doubles() == kDontSaveFPRegs; return save_doubles() == kDontSaveFPRegs;
} }
@ -927,7 +936,7 @@ class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> { class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
public: public:
explicit LCheckMaps(LOperand* value, LOperand* temp = NULL) { LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp; temps_[0] = temp;
} }
@ -1324,14 +1333,14 @@ class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> { class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public: public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) { LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
inputs_[0] = left; inputs_[0] = dividend;
inputs_[1] = right; inputs_[1] = divisor;
temps_[0] = temp; temps_[0] = temp;
} }
LOperand* left() { return inputs_[0]; } LOperand* dividend() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; } LOperand* divisor() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; } LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
@ -1930,10 +1939,19 @@ class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
}; };
class LMathFloor V8_FINAL : public LUnaryMathOperation<0> { // Math.floor with a double result.
class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
};
// Math.floor with an integer result.
class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> {
public: public:
explicit LMathFloor(LOperand* value) : LUnaryMathOperation<0>(value) { } explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor") DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
}; };
@ -2029,16 +2047,28 @@ class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
}; };
class LMathRound V8_FINAL : public LUnaryMathOperation<1> { // Math.round with an integer result.
class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
public:
explicit LMathRoundD(LOperand* value)
: LUnaryMathOperation<0>(value) {
}
DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
};
// Math.round with an integer result.
class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
public: public:
LMathRound(LOperand* value, LOperand* temp1) LMathRoundI(LOperand* value, LOperand* temp1)
: LUnaryMathOperation<1>(value) { : LUnaryMathOperation<1>(value) {
temps_[0] = temp1; temps_[0] = temp1;
} }
LOperand* temp1() { return temps_[0]; } LOperand* temp1() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
}; };
@ -2384,6 +2414,10 @@ class LStoreKeyed : public LTemplateInstruction<0, 3, T> {
} }
bool NeedsCanonicalization() { bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
return false;
}
return this->hydrogen()->NeedsCanonicalization(); return this->hydrogen()->NeedsCanonicalization();
} }
uint32_t additional_index() const { return this->hydrogen()->index_offset(); } uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
@ -2500,7 +2534,6 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Map> transition() const { return hydrogen()->transition_map(); }
Representation representation() const { Representation representation() const {
return hydrogen()->field_representation(); return hydrogen()->field_representation();
} }
@ -2725,7 +2758,14 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> { class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
public: public:
LSubI(LOperand* left, LOperand* right) { LSubI(LOperand* left, LOperand* right)
: shift_(NO_SHIFT), shift_amount_(0) {
inputs_[0] = left;
inputs_[1] = right;
}
LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
: shift_(shift), shift_amount_(shift_amount) {
inputs_[0] = left; inputs_[0] = left;
inputs_[1] = right; inputs_[1] = right;
} }
@ -2733,8 +2773,15 @@ class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; } LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; } LOperand* right() { return inputs_[1]; }
Shift shift() const { return shift_; }
LOperand* shift_amount() const { return shift_amount_; }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i") DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub) DECLARE_HYDROGEN_ACCESSOR(Sub)
protected:
Shift shift_;
LOperand* shift_amount_;
}; };
@ -2778,7 +2825,7 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
LTransitionElementsKind(LOperand* object, LTransitionElementsKind(LOperand* object,
LOperand* context, LOperand* context,
LOperand* temp1, LOperand* temp1,
LOperand* temp2 = NULL) { LOperand* temp2) {
inputs_[0] = object; inputs_[0] = object;
inputs_[1] = context; inputs_[1] = context;
temps_[0] = temp1; temps_[0] = temp1;
@ -3042,6 +3089,9 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
// Temporary operand that must be in a register. // Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister(); MUST_USE_RESULT LUnallocated* TempRegister();
// Temporary operand that must be in a double register.
MUST_USE_RESULT LUnallocated* TempDoubleRegister();
// Temporary operand that must be in a fixed double register. // Temporary operand that must be in a fixed double register.
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg); MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
@ -3075,6 +3125,39 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
void VisitInstruction(HInstruction* current); void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block); void DoBasicBlock(HBasicBlock* block);
int JSShiftAmountFromHConstant(HValue* constant) {
return HConstant::cast(constant)->Integer32Value() & 0x1f;
}
bool LikelyFitsImmField(HInstruction* instr, int imm) {
if (instr->IsAdd() || instr->IsSub()) {
return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
} else {
ASSERT(instr->IsBitwise());
unsigned unused_n, unused_imm_s, unused_imm_r;
return Assembler::IsImmLogical(imm, kWRegSizeInBits,
&unused_n, &unused_imm_s, &unused_imm_r);
}
}
// Indicates if a sequence of the form
// lsl x8, x9, #imm
// add x0, x1, x8
// can be replaced with:
// add x0, x1, x9 LSL #imm
// If this is not possible, the function returns NULL. Otherwise it returns a
// pointer to the shift instruction that would be optimized away.
HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
HValue** left = NULL);
// Checks if all uses of the shift operation can optimize it away.
bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
// Attempts to merge the binary operation and an eventual previous shift
// operation into a single operation. Returns the merged instruction on
// success, and NULL otherwise.
LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
HValue* left,
HBitwiseBinaryOperation* shift);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr); LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op, LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr); HArithmeticBinaryOperation* instr);

771
deps/v8/src/arm64/lithium-codegen-arm64.cc

File diff suppressed because it is too large

100
deps/v8/src/arm64/lithium-codegen-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
#define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_ #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
@ -35,7 +12,7 @@
#include "lithium-codegen.h" #include "lithium-codegen.h"
#include "safepoint-table.h" #include "safepoint-table.h"
#include "scopes.h" #include "scopes.h"
#include "v8utils.h" #include "utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -60,10 +37,16 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false), frame_is_built_(false),
safepoints_(info->zone()), safepoints_(info->zone()),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple),
after_push_argument_(false),
inlined_arguments_(false) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
} }
~LCodeGen() {
ASSERT(!after_push_argument_ || inlined_arguments_);
}
// Simple accessors. // Simple accessors.
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
@ -98,6 +81,7 @@ class LCodeGen: public LCodeGenBase {
// information on it. // information on it.
void FinishCode(Handle<Code> code); void FinishCode(Handle<Code> code);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
Register ToRegister(LOperand* op) const; Register ToRegister(LOperand* op) const;
@ -105,9 +89,30 @@ class LCodeGen: public LCodeGenBase {
Operand ToOperand(LOperand* op); Operand ToOperand(LOperand* op);
Operand ToOperand32I(LOperand* op); Operand ToOperand32I(LOperand* op);
Operand ToOperand32U(LOperand* op); Operand ToOperand32U(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const; enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const; Handle<Object> ToHandle(LConstantOperand* op) const;
template<class LI>
Operand ToShiftedRightOperand32I(LOperand* right,
LI* shift_info) {
return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
}
template<class LI>
Operand ToShiftedRightOperand32U(LOperand* right,
LI* shift_info) {
return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
}
template<class LI>
Operand ToShiftedRightOperand32(LOperand* right,
LI* shift_info,
IntegerSignedness signedness);
int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
}
// TODO(jbramley): Examine these helpers and check that they make sense. // TODO(jbramley): Examine these helpers and check that they make sense.
// IsInteger32Constant returns true for smi constants, for example. // IsInteger32Constant returns true for smi constants, for example.
bool IsInteger32Constant(LConstantOperand* op) const; bool IsInteger32Constant(LConstantOperand* op) const;
@ -137,7 +142,6 @@ class LCodeGen: public LCodeGenBase {
Label* exit, Label* exit,
Label* allocation_entry); Label* allocation_entry);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagU(LInstruction* instr, void DoDeferredNumberTagU(LInstruction* instr,
LOperand* value, LOperand* value,
LOperand* temp1, LOperand* temp1,
@ -149,6 +153,10 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredAllocate(LAllocate* instr); void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object); void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
Register object,
Register index);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness); Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
@ -224,7 +232,7 @@ class LCodeGen: public LCodeGenBase {
Deoptimizer::BailoutType* override_bailout_type = NULL); Deoptimizer::BailoutType* override_bailout_type = NULL);
void Deoptimize(LEnvironment* environment, void Deoptimize(LEnvironment* environment,
Deoptimizer::BailoutType* override_bailout_type = NULL); Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cc, LEnvironment* environment); void DeoptimizeIf(Condition cond, LEnvironment* environment);
void DeoptimizeIfZero(Register rt, LEnvironment* environment); void DeoptimizeIfZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNotZero(Register rt, LEnvironment* environment); void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNegative(Register rt, LEnvironment* environment); void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
@ -239,7 +247,6 @@ class LCodeGen: public LCodeGenBase {
void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment); void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment); void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment); void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
MemOperand PrepareKeyedExternalArrayOperand(Register key, MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base, Register base,
@ -249,19 +256,19 @@ class LCodeGen: public LCodeGenBase {
int constant_key, int constant_key,
ElementsKind elements_kind, ElementsKind elements_kind,
int additional_index); int additional_index);
void CalcKeyedArrayBaseRegister(Register base, MemOperand PrepareKeyedArrayOperand(Register base,
Register elements, Register elements,
Register key, Register key,
bool key_is_tagged, bool key_is_tagged,
ElementsKind elements_kind); ElementsKind elements_kind,
Representation representation,
int additional_index);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment, void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode); Safepoint::DeoptMode mode);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Emit frame translation commands for an environment. // Emit frame translation commands for an environment.
@ -368,6 +375,15 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_; Safepoint::Kind expected_safepoint_kind_;
// This flag is true when we are after a push (but before a call).
// In this situation, jssp no longer references the end of the stack slots so,
// we can only reference a stack slot via fp.
bool after_push_argument_;
// If we have inlined arguments, we are no longer able to use jssp because
// jssp is modified and we never know if we are in a block after or before
// the pop of the arguments (which restores jssp).
bool inlined_arguments_;
int old_position_; int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED { class PushSafepointRegistersScope BASE_EMBEDDED {
@ -387,12 +403,12 @@ class LCodeGen: public LCodeGenBase {
codegen_->masm_->Mov(to_be_pushed_lr, lr); codegen_->masm_->Mov(to_be_pushed_lr, lr);
switch (codegen_->expected_safepoint_kind_) { switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters: { case Safepoint::kWithRegisters: {
StoreRegistersStateStub stub(kDontSaveFPRegs); StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
codegen_->masm_->CallStub(&stub); codegen_->masm_->CallStub(&stub);
break; break;
} }
case Safepoint::kWithRegistersAndDoubles: { case Safepoint::kWithRegistersAndDoubles: {
StoreRegistersStateStub stub(kSaveFPRegs); StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
codegen_->masm_->CallStub(&stub); codegen_->masm_->CallStub(&stub);
break; break;
} }
@ -406,12 +422,12 @@ class LCodeGen: public LCodeGenBase {
ASSERT((kind & Safepoint::kWithRegisters) != 0); ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) { switch (kind) {
case Safepoint::kWithRegisters: { case Safepoint::kWithRegisters: {
RestoreRegistersStateStub stub(kDontSaveFPRegs); RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
codegen_->masm_->CallStub(&stub); codegen_->masm_->CallStub(&stub);
break; break;
} }
case Safepoint::kWithRegistersAndDoubles: { case Safepoint::kWithRegistersAndDoubles: {
RestoreRegistersStateStub stub(kSaveFPRegs); RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
codegen_->masm_->CallStub(&stub); codegen_->masm_->CallStub(&stub);
break; break;
} }

27
deps/v8/src/arm64/lithium-gap-resolver-arm64.cc

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h" #include "v8.h"

27
deps/v8/src/arm64/lithium-gap-resolver-arm64.h

@ -1,29 +1,6 @@
// Copyright 2013 the V8 project authors. All rights reserved. // Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Use of this source code is governed by a BSD-style license that can be
// modification, are permitted provided that the following conditions are // found in the LICENSE file.
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ #ifndef V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
#define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_ #define V8_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save