Browse Source

deps: update v8 to 3.24.35.17

v0.11.13-release
Fedor Indutny 11 years ago
parent
commit
4d140746f0
  1. 4
      deps/v8/AUTHORS
  2. 44
      deps/v8/ChangeLog
  3. 2
      deps/v8/LICENSE
  4. 7
      deps/v8/Makefile
  5. 60
      deps/v8/Makefile.android
  6. 18
      deps/v8/build/android.gypi
  7. 2
      deps/v8/build/features.gypi
  8. 7
      deps/v8/build/standalone.gypi
  9. 11
      deps/v8/build/toolchain.gypi
  10. 6
      deps/v8/include/v8-profiler.h
  11. 44
      deps/v8/include/v8.h
  12. 1
      deps/v8/src/a64/OWNERS
  13. 1200
      deps/v8/src/a64/assembler-a64-inl.h
  14. 2606
      deps/v8/src/a64/assembler-a64.cc
  15. 2085
      deps/v8/src/a64/assembler-a64.h
  16. 1479
      deps/v8/src/a64/builtins-a64.cc
  17. 5809
      deps/v8/src/a64/code-stubs-a64.cc
  18. 469
      deps/v8/src/a64/code-stubs-a64.h
  19. 616
      deps/v8/src/a64/codegen-a64.cc
  20. 70
      deps/v8/src/a64/codegen-a64.h
  21. 1262
      deps/v8/src/a64/constants-a64.h
  22. 199
      deps/v8/src/a64/cpu-a64.cc
  23. 107
      deps/v8/src/a64/cpu-a64.h
  24. 394
      deps/v8/src/a64/debug-a64.cc
  25. 111
      deps/v8/src/a64/debugger-a64.cc
  26. 56
      deps/v8/src/a64/debugger-a64.h
  27. 726
      deps/v8/src/a64/decoder-a64.cc
  28. 202
      deps/v8/src/a64/decoder-a64.h
  29. 376
      deps/v8/src/a64/deoptimizer-a64.cc
  30. 1854
      deps/v8/src/a64/disasm-a64.cc
  31. 115
      deps/v8/src/a64/disasm-a64.h
  32. 57
      deps/v8/src/a64/frames-a64.cc
  33. 131
      deps/v8/src/a64/frames-a64.h
  34. 5010
      deps/v8/src/a64/full-codegen-a64.cc
  35. 1413
      deps/v8/src/a64/ic-a64.cc
  36. 334
      deps/v8/src/a64/instructions-a64.cc
  37. 516
      deps/v8/src/a64/instructions-a64.h
  38. 618
      deps/v8/src/a64/instrument-a64.cc
  39. 108
      deps/v8/src/a64/instrument-a64.h
  40. 2449
      deps/v8/src/a64/lithium-a64.cc
  41. 2967
      deps/v8/src/a64/lithium-a64.h
  42. 5692
      deps/v8/src/a64/lithium-codegen-a64.cc
  43. 473
      deps/v8/src/a64/lithium-codegen-a64.h
  44. 326
      deps/v8/src/a64/lithium-gap-resolver-a64.cc
  45. 90
      deps/v8/src/a64/lithium-gap-resolver-a64.h
  46. 1647
      deps/v8/src/a64/macro-assembler-a64-inl.h
  47. 4975
      deps/v8/src/a64/macro-assembler-a64.cc
  48. 2238
      deps/v8/src/a64/macro-assembler-a64.h
  49. 1730
      deps/v8/src/a64/regexp-macro-assembler-a64.cc
  50. 315
      deps/v8/src/a64/regexp-macro-assembler-a64.h
  51. 3414
      deps/v8/src/a64/simulator-a64.cc
  52. 868
      deps/v8/src/a64/simulator-a64.h
  53. 1548
      deps/v8/src/a64/stub-cache-a64.cc
  54. 112
      deps/v8/src/a64/utils-a64.cc
  55. 109
      deps/v8/src/a64/utils-a64.h
  56. 1
      deps/v8/src/allocation-tracker.cc
  57. 26
      deps/v8/src/api.cc
  58. 1
      deps/v8/src/arm/OWNERS
  59. 177
      deps/v8/src/arm/code-stubs-arm.cc
  60. 10
      deps/v8/src/arm/debug-arm.cc
  61. 167
      deps/v8/src/arm/full-codegen-arm.cc
  62. 14
      deps/v8/src/arm/ic-arm.cc
  63. 3
      deps/v8/src/arm/lithium-arm.cc
  64. 2
      deps/v8/src/arm/lithium-arm.h
  65. 100
      deps/v8/src/arm/lithium-codegen-arm.cc
  66. 21
      deps/v8/src/arm/macro-assembler-arm.cc
  67. 4
      deps/v8/src/arm/simulator-arm.h
  68. 63
      deps/v8/src/arm/stub-cache-arm.cc
  69. 11
      deps/v8/src/arraybuffer.js
  70. 7
      deps/v8/src/assembler.cc
  71. 26
      deps/v8/src/assembler.h
  72. 35
      deps/v8/src/ast.cc
  73. 79
      deps/v8/src/ast.h
  74. 2
      deps/v8/src/atomicops.h
  75. 416
      deps/v8/src/atomicops_internals_a64_gcc.h
  76. 7
      deps/v8/src/bootstrapper.cc
  77. 8
      deps/v8/src/builtins.cc
  78. 21
      deps/v8/src/char-predicates.h
  79. 18
      deps/v8/src/checks.h
  80. 89
      deps/v8/src/code-stubs-hydrogen.cc
  81. 3
      deps/v8/src/code-stubs.cc
  82. 59
      deps/v8/src/code-stubs.h
  83. 2
      deps/v8/src/codegen.cc
  84. 2
      deps/v8/src/codegen.h
  85. 30
      deps/v8/src/compiler.cc
  86. 11
      deps/v8/src/compiler.h
  87. 2
      deps/v8/src/contexts.h
  88. 2
      deps/v8/src/conversions-inl.h
  89. 2
      deps/v8/src/dateparser.h
  90. 2
      deps/v8/src/debug.cc
  91. 65
      deps/v8/src/deoptimizer.cc
  92. 20
      deps/v8/src/deoptimizer.h
  93. 28
      deps/v8/src/execution.cc
  94. 9
      deps/v8/src/execution.h
  95. 8
      deps/v8/src/factory.cc
  96. 4
      deps/v8/src/factory.h
  97. 110
      deps/v8/src/feedback-slots.h
  98. 43
      deps/v8/src/flag-definitions.h
  99. 2
      deps/v8/src/frames-inl.h
  100. 4
      deps/v8/src/frames.h

4
deps/v8/AUTHORS

@ -17,10 +17,8 @@ Opera Software ASA
Akinori MUSHA <knu@FreeBSD.org> Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com> Alexander Karpinsky <homm86@gmail.com>
Alexandre Rames <alexandre.rames@arm.com>
Alexandre Vassalotti <avassalotti@gmail.com> Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com> Andreas Anyuru <andreas.anyuru@gmail.com>
Baptiste Afsa <baptiste.afsa@arm.com>
Bert Belder <bertbelder@gmail.com> Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com> Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com> Craig Schlenter <craig.schlenter@gmail.com>
@ -33,7 +31,6 @@ Fedor Indutny <fedor@indutny.com>
Filipe David Manana <fdmanana@gmail.com> Filipe David Manana <fdmanana@gmail.com>
Haitao Feng <haitao.feng@intel.com> Haitao Feng <haitao.feng@intel.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com> Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Jacob Bramley <jacob.bramley@arm.com>
Jan de Mooij <jandemooij@gmail.com> Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com> Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net> James Pike <g00gle@chilon.net>
@ -62,7 +59,6 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com> Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de> Tobias Burnus <burnus@net-b.de>
Vincent Belliard <vincent.belliard@arm.com>
Vlad Burlik <vladbph@gmail.com> Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com> Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com> Yuqiang Xian <yuqiang.xian@intel.com>

44
deps/v8/ChangeLog

@ -1,47 +1,3 @@
2014-02-19: Version 3.24.40
A64: Let the MacroAssembler resolve branches to distant targets (issue
3148).
Fixed and improved code for integral division. Fixed and extended tests
(issue 3151).
MIPS: Fix assignment of function name constant (issue 3138).
Fix assignment of function name constant (issue 3138).
Performance and stability improvements on all platforms.
2014-02-14: Version 3.24.39
Introduce --job-based-sweeping flag and use individual jobs for sweeping
if set (issue 3104).
Performance and stability improvements on all platforms.
2014-02-13: Version 3.24.38
Merge experimental/a64 to bleeding_edge (issue 3113).
Performance and stability improvements on all platforms.
2014-02-12: Version 3.24.37
Fix spec violations in JSON.stringify wrt replacer array (issue 3135).
Performance and stability improvements on all platforms.
2014-02-11: Version 3.24.36
Fix inconsistencies wrt whitespaces (issue 3109).
Performance and stability improvements on all platforms.
2014-02-10: Version 3.24.35 2014-02-10: Version 3.24.35
Fix inconsistencies wrt whitespaces (issue 3109). Fix inconsistencies wrt whitespaces (issue 3109).

2
deps/v8/LICENSE

@ -26,7 +26,7 @@ are:
These libraries have their own licenses; we recommend you read them, These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below. as their terms may differ from the terms below.
Copyright 2014, the V8 project authors. All rights reserved. Copyright 2006-2012, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are
met: met:

7
deps/v8/Makefile

@ -223,11 +223,11 @@ endif
# Architectures and modes to be compiled. Consider these to be internal # Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead). # variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm a64 mipsel ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug MODES = release debug optdebug
DEFAULT_MODES = release debug DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel ANDROID_ARCHES = android_ia32 android_arm android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64 NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration: # List of files that trigger Makefile regeneration:
@ -359,12 +359,11 @@ native.check: native
--arch-and-mode=. $(TESTFLAGS) --arch-and-mode=. $(TESTFLAGS)
FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug FASTTESTMODES = ia32.release,x64.release,ia32.optdebug,x64.optdebug,arm.optdebug
FASTCOMPILEMODES = $(FASTTESTMODES),a64.optdebug
COMMA = , COMMA = ,
EMPTY = EMPTY =
SPACE = $(EMPTY) $(EMPTY) SPACE = $(EMPTY) $(EMPTY)
quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES)) quickcheck: $(subst $(COMMA),$(SPACE),$(FASTTESTMODES))
tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \ tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck qc: quickcheck

60
deps/v8/Makefile.android

@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile # Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_arm android_a64 android_mipsel ANDROID_ARCHES = android_ia32 android_arm android_mipsel
MODES = release debug MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES, # Generates all combinations of ANDROID ARCHES and MODES,
@ -49,40 +49,24 @@ endif
ifeq ($(ARCH), android_arm) ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm DEFINES = target_arch=arm v8_target_arch=arm android_target_arch=arm
DEFINES += arm_neon=0 arm_version=7 DEFINES += arm_neon=0 arm_version=7
TOOLCHAIN_ARCH = arm-linux-androideabi TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else else
ifeq ($(ARCH), android_a64) ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=a64 v8_target_arch=a64 android_target_arch=arm64 DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
TOOLCHAIN_ARCH = aarch64-linux-android DEFINES += mips_arch_variant=mips32r2
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) TOOLCHAIN_ARCH = mipsel-linux-android-4.6
TOOLCHAIN_VER = 4.8
else else
ifeq ($(ARCH), android_mipsel) ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=mipsel v8_target_arch=mipsel DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2 TOOLCHAIN_ARCH = x86-4.6
TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6
else else
ifeq ($(ARCH), android_ia32) $(error Target architecture "${ARCH}" is not supported)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif endif
endif endif
endif endif
TOOLCHAIN_PATH = \ TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}-${TOOLCHAIN_VER}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR} ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),) ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \ $(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \ check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
@ -95,23 +79,23 @@ DEFINES += host_os=${HOST_OS}
.SECONDEXPANSION: .SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@ $(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
CXX="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \ CXX="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
AR="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ar" \ AR="$(ANDROID_TOOLCHAIN)/bin/*-ar" \
RANLIB="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ranlib" \ RANLIB="$(ANDROID_TOOLCHAIN)/bin/*-ranlib" \
CC="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-gcc" \ CC="$(ANDROID_TOOLCHAIN)/bin/*-gcc" \
LD="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-ld" \ LD="$(ANDROID_TOOLCHAIN)/bin/*-ld" \
LINK="$(ANDROID_TOOLCHAIN)/bin/${TOOLCHAIN_PREFIX}-g++" \ LINK="$(ANDROID_TOOLCHAIN)/bin/*-g++" \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \ python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@" builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets. # Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS)) ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES): $(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \ GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \ GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \ CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \ CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \ PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \

18
deps/v8/build/android.gypi

@ -184,11 +184,6 @@
'-L<(android_stlport_libs)/x86', '-L<(android_stlport_libs)/x86',
], ],
}], }],
['target_arch=="a64"', {
'ldflags': [
'-L<(android_stlport_libs)/arm64',
],
}],
], ],
}], }],
['target_arch=="ia32"', { ['target_arch=="ia32"', {
@ -213,19 +208,10 @@
], ],
'target_conditions': [ 'target_conditions': [
['_type=="executable"', { ['_type=="executable"', {
'conditions': [
['target_arch=="a64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
}, {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker',
],
}]
],
'ldflags': [ 'ldflags': [
'-Bdynamic', '-Bdynamic',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc', '-Wl,-z,nocopyreloc',
# crtbegin_dynamic.o should be the last item in ldflags. # crtbegin_dynamic.o should be the last item in ldflags.
'<(android_lib)/crtbegin_dynamic.o', '<(android_lib)/crtbegin_dynamic.o',

2
deps/v8/build/features.gypi

@ -115,7 +115,7 @@
'Release': { 'Release': {
'variables': { 'variables': {
'v8_enable_extra_checks%': 0, 'v8_enable_extra_checks%': 0,
'v8_enable_handle_zapping%': 1, 'v8_enable_handle_zapping%': 0,
}, },
'conditions': [ 'conditions': [
['v8_enable_extra_checks==1', { ['v8_enable_extra_checks==1', {

7
deps/v8/build/standalone.gypi

@ -52,11 +52,7 @@
# to gyp. # to gyp.
'host_arch%': 'host_arch%':
'<!(uname -m | sed -e "s/i.86/ia32/;\ '<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;\ s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")',
s/amd64/x64/;\
s/aarch64/a64/;\
s/arm.*/arm/;\
s/mips.*/mipsel/")',
}, { }, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac" # OS!="netbsd" and OS!="mac"
@ -101,7 +97,6 @@
'conditions': [ 'conditions': [
['(v8_target_arch=="arm" and host_arch!="arm") or \ ['(v8_target_arch=="arm" and host_arch!="arm") or \
(v8_target_arch=="a64" and host_arch!="a64") or \
(v8_target_arch=="mipsel" and host_arch!="mipsel") or \ (v8_target_arch=="mipsel" and host_arch!="mipsel") or \
(v8_target_arch=="x64" and host_arch!="x64") or \ (v8_target_arch=="x64" and host_arch!="x64") or \
(OS=="android" or OS=="qnx")', { (OS=="android" or OS=="qnx")', {

11
deps/v8/build/toolchain.gypi

@ -268,11 +268,6 @@
}], # _toolset=="target" }], # _toolset=="target"
], ],
}], # v8_target_arch=="arm" }], # v8_target_arch=="arm"
['v8_target_arch=="a64"', {
'defines': [
'V8_TARGET_ARCH_A64',
],
}],
['v8_target_arch=="ia32"', { ['v8_target_arch=="ia32"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_IA32', 'V8_TARGET_ARCH_IA32',
@ -412,8 +407,7 @@
}], }],
], ],
}], }],
['(OS=="linux" or OS=="android") and \ ['(OS=="linux") and (v8_target_arch=="x64")', {
(v8_target_arch=="x64" or v8_target_arch=="a64")', {
# Check whether the host compiler and target compiler support the # Check whether the host compiler and target compiler support the
# '-m64' option and set it if so. # '-m64' option and set it if so.
'target_conditions': [ 'target_conditions': [
@ -519,8 +513,7 @@
OS=="qnx"', { OS=="qnx"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual', '-Wnon-virtual-dtor', '-Woverloaded-virtual',
'<(wno_array_bounds)', '<(wno_array_bounds)' ],
],
'conditions': [ 'conditions': [
['v8_optimized_debug==0', { ['v8_optimized_debug==0', {
'cflags!': [ 'cflags!': [

6
deps/v8/include/v8-profiler.h

@ -257,11 +257,7 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const; SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */ /** Returns node's own size, in bytes. */
V8_DEPRECATED("Use GetShallowSize instead", int GetSelfSize() const;
int GetSelfSize() const);
/** Returns node's own size, in bytes. */
size_t GetShallowSize() const;
/** Returns child nodes count of the node. */ /** Returns child nodes count of the node. */
int GetChildrenCount() const; int GetChildrenCount() const;

44
deps/v8/include/v8.h

@ -4127,12 +4127,13 @@ class V8_EXPORT Isolate {
/** /**
* Enables the host application to receive a notification before a * Enables the host application to receive a notification before a
* garbage collection. Allocations are allowed in the callback function, * garbage collection. Allocations are not allowed in the
* but the callback is not re-entrant: if the allocation inside it will * callback function, you therefore cannot manipulate objects (set
* trigger the Garbage Collection, the callback won't be called again. * or delete properties for example) since it is possible such
* It is possible to specify the GCType filter for your callback. But it is * operations will result in the allocation of objects. It is possible
* not possible to register the same callback function two times with * to specify the GCType filter for your callback. But it is not possible to
* different GCType filters. * register the same callback function two times with different
* GCType filters.
*/ */
void AddGCPrologueCallback( void AddGCPrologueCallback(
GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll); GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -4145,12 +4146,13 @@ class V8_EXPORT Isolate {
/** /**
* Enables the host application to receive a notification after a * Enables the host application to receive a notification after a
* garbage collection. Allocations are allowed in the callback function, * garbage collection. Allocations are not allowed in the
* but the callback is not re-entrant: if the allocation inside it will * callback function, you therefore cannot manipulate objects (set
* trigger the Garbage Collection, the callback won't be called again. * or delete properties for example) since it is possible such
* It is possible to specify the GCType filter for your callback. But it is * operations will result in the allocation of objects. It is possible
* not possible to register the same callback function two times with * to specify the GCType filter for your callback. But it is not possible to
* different GCType filters. * register the same callback function two times with different
* GCType filters.
*/ */
void AddGCEpilogueCallback( void AddGCEpilogueCallback(
GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll); GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
@ -4576,22 +4578,6 @@ class V8_EXPORT V8 {
*/ */
static void RemoveCallCompletedCallback(CallCompletedCallback callback); static void RemoveCallCompletedCallback(CallCompletedCallback callback);
/**
* Experimental: Runs the Microtask Work Queue until empty
*/
static void RunMicrotasks(Isolate* isolate);
/**
* Experimental: Enqueues the callback to the Microtask Work Queue
*/
static void EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask);
/**
* Experimental: Controls whether the Microtask Work Queue is automatically
* run when the script call depth decrements to zero.
*/
static void SetAutorunMicrotasks(Isolate *source, bool autorun);
/** /**
* Initializes from snapshot if possible. Otherwise, attempts to * Initializes from snapshot if possible. Otherwise, attempts to
* initialize from scratch. This function is called implicitly if * initialize from scratch. This function is called implicitly if
@ -5412,7 +5398,7 @@ class Internals {
static const int kNullValueRootIndex = 7; static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8; static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9; static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 141; static const int kEmptyStringRootIndex = 147;
static const int kNodeClassIdOffset = 1 * kApiPointerSize; static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;

1
deps/v8/src/a64/OWNERS

@ -1 +0,0 @@
rmcilroy@chromium.org

1200
deps/v8/src/a64/assembler-a64-inl.h

File diff suppressed because it is too large

2606
deps/v8/src/a64/assembler-a64.cc

File diff suppressed because it is too large

2085
deps/v8/src/a64/assembler-a64.h

File diff suppressed because it is too large

1479
deps/v8/src/a64/builtins-a64.cc

File diff suppressed because it is too large

5809
deps/v8/src/a64/code-stubs-a64.cc

File diff suppressed because it is too large

469
deps/v8/src/a64/code-stubs-a64.h

@ -1,469 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CODE_STUBS_A64_H_
#define V8_A64_CODE_STUBS_A64_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() { return StoreBufferOverflow; }
int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic {
public:
// TODO(all): These don't seem to be used any more. Delete them.
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class RecordWriteStub: public PlatformCodeStub {
public:
// Stub to record the write of 'value' at 'address' in 'object'.
// Typically 'address' = 'object' + <some offset>.
// See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
virtual bool SometimesSetsUpAFrame() { return false; }
static Mode GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
Instruction* instr1 =
reinterpret_cast<Instruction*>(stub->instruction_start());
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
ASSERT(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
// We patch the two first instructions of the stub back and forth between an
// adr and branch when we start and stop incremental heap marking.
// The branch is
// b label
// The adr is
// adr xzr label
// so effectively a nop.
static void Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(
reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
ASSERT(GetMode(stub) == mode);
}
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved) {
ASSERT(!AreAliased(scratch, object, address));
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = Register(pool_available.PopLowestIndex());
scratch2_ = Register(pool_available.PopLowestIndex());
// SaveCallerRegisters method needs to save caller saved register, however
// we don't bother saving ip0 and ip1 because they are used as scratch
// registers by the MacroAssembler.
saved_regs_.Remove(ip0);
saved_regs_.Remove(ip1);
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->Push(scratch1_, scratch2_);
}
void Restore(MacroAssembler* masm) {
masm->Pop(scratch2_, scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) {
masm->PushCPURegList(kCallerSavedFP);
}
}
void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) {
masm->PopCPURegList(kCallerSavedFP);
}
masm->PopCPURegList(saved_regs_);
}
Register object() { return object_; }
Register address() { return address_; }
Register scratch0() { return scratch0_; }
Register scratch1() { return scratch1_; }
Register scratch2() { return scratch2_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
Register scratch2_;
CPURegList saved_regs_;
// TODO(all): We should consider moving this somewhere else.
static CPURegList GetValidRegistersForAllocation() {
// The list of valid registers for allocation is defined as all the
// registers without those with a special meaning.
//
// The default list excludes registers x26 to x31 because they are
// reserved for the following purpose:
// - x26 root register
// - x27 context pointer register
// - x28 jssp
// - x29 frame pointer
// - x30 link register(lr)
// - x31 xzr/stack pointer
CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
// We also remove MacroAssembler's scratch registers.
list.Remove(ip0);
list.Remove(ip1);
list.Remove(x8);
list.Remove(x9);
return list;
}
friend class RecordWriteStub;
};
// A list of stub variants which are pregenerated.
// The variants are stored in the same format as the minor key, so
// MinorKeyFor() can be used to populate and check this list.
static const int kAheadOfTime[];
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
int MinorKey() {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
static int MinorKeyFor(Register object,
Register value,
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
ASSERT(object.Is64Bits());
ASSERT(value.Is64Bits());
ASSERT(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(action) |
SaveFPRegsModeBits::encode(fp_mode);
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_;
RegisterAllocation regs_;
};
// Helper to call C++ functions from generated code. The caller must prepare
// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() { return NameDictionaryLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
// Compares two flat ASCII strings and returns result in x0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compare two flat ASCII strings for equality and returns result
// in x0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
};
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal
#endif // V8_A64_CODE_STUBS_A64_H_

616
deps/v8/src/a64/codegen-a64.cc

@ -1,616 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-a64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#if defined(USE_SIMULATOR)
byte* fast_exp_a64_machine_code = NULL;
double fast_exp_simulator(double x) {
Simulator * simulator = Simulator::current(Isolate::Current());
Simulator::CallArgument args[] = {
Simulator::CallArgument(x),
Simulator::CallArgument::End()
};
return simulator->CallDouble(fast_exp_a64_machine_code, args);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!FLAG_fast_math) return &std::exp;
// Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
// an AAPCS64-compliant exp() function. This will be faster than the C
// library's exp() function, but probably less accurate.
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &std::exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
masm.SetStackPointer(csp);
// The argument will be in d0 on entry.
DoubleRegister input = d0;
// Use other caller-saved registers for all other values.
DoubleRegister result = d1;
DoubleRegister double_temp1 = d2;
DoubleRegister double_temp2 = d3;
Register temp1 = x10;
Register temp2 = x11;
Register temp3 = x12;
MathExpGenerator::EmitMathExp(&masm, input, result,
double_temp1, double_temp2,
temp1, temp2, temp3);
// Move the result to the return register.
masm.Fmov(d0, result);
masm.Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_a64_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
UnaryMathFunction CreateSqrtFunction() {
return &std::sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->EnterFrame(StackFrame::INTERNAL);
ASSERT(!masm->has_frame());
masm->set_has_frame(true);
}
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
ASSERT(masm->has_frame());
masm->set_has_frame(false);
}
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- x2 : receiver
// -- x3 : target map
// -----------------------------------
Register receiver = x2;
Register map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
// ----------- S t a t e -------------
// -- lr : return address
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- x3 : target map, scratch for subsequent call
// -----------------------------------
Register receiver = x2;
Register target_map = x3;
Label gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
Register array_size = x6;
Register array = x7;
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Add(x10, array, kHeapObjectTag);
__ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
x6, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
Label entry, done;
__ B(&entry);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(lr);
__ B(fail);
// Iterate over the array, copying and coverting smis to doubles. If an
// element is non-smi, write a hole to the destination.
{
Label loop;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
__ Tst(x13, kSmiTagMask);
__ Fcsel(d0, d0, nan_d, eq);
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(lr);
__ Bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
// -- x3 : target map, scratch for subsequent call
// -- x4 : scratch (elements)
// -----------------------------------
Register value = x0;
Register key = x1;
Register receiver = x2;
Register target_map = x3;
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
Register elements = x4;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
Register length = x5;
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Register array_size = x6;
Register array = x7;
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = x6;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize);
__ Add(array, array, kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
Register the_hole = x14;
Register heap_num_map = x15;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
__ B(&entry);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(value, key, receiver, target_map);
__ Pop(lr);
__ B(fail);
{
Label loop, convert_hole;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ Cmp(x13, kHoleNanInt64);
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = x5;
__ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
__ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&entry);
// Replace the-hole NaN with the-hole pointer.
__ Bind(&convert_hole);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(value, key, receiver, target_map);
// Replace receiver's backing store with newly created and filled FixedArray.
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(lr);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
bool Code::IsYoungSequence(byte* sequence) {
return MacroAssembler::IsYoungSequence(sequence);
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(Isolate* isolate,
byte* sequence,
Code::Age age,
MarkingParity parity) {
PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
Code * stub = GetCodeAgeStub(isolate, age, parity);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
// Handle slices.
Label indirect_string_loaded;
__ Ldrsw(result,
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result);
__ B(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ Bind(&cons_string);
__ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ Bind(&indirect_string_loaded);
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ Bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&check_encoding);
// Handle external strings.
__ Bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Tst(result, kIsIndirectStringMask);
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_CHECK(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, LSL, 1));
__ B(&done);
__ Bind(&ascii);
// Ascii string.
__ Ldrb(result, MemOperand(string, index));
__ Bind(&done);
}
static MemOperand ExpConstant(Register base, int index) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_temp1,
DoubleRegister double_temp2,
Register temp1,
Register temp2,
Register temp3) {
// TODO(jbramley): There are several instances where fnmsub could be used
// instead of fmul and fsub. Doing this changes the result, but since this is
// an estimation anyway, does it matter?
ASSERT(!AreAliased(input, result,
double_temp1, double_temp2,
temp1, temp2, temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
DoubleRegister double_temp3 = result;
Register constants = temp3;
// The algorithm used relies on some magic constants which are initialized in
// ExternalReference::InitializeMathExpData().
// Load the address of the start of the array.
__ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
// We have to do a four-way split here:
// - If input <= about -708.4, the output always rounds to zero.
// - If input >= about 709.8, the output always rounds to +infinity.
// - If the input is NaN, the output is NaN.
// - Otherwise, the result needs to be calculated.
Label result_is_finite_non_zero;
// Assert that we can load offset 0 (the small input threshold) and offset 1
// (the large input threshold) with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
ExpConstant(constants, 0).offset()));
__ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
__ Fcmp(input, double_temp1);
__ Fccmp(input, double_temp2, NoFlag, hi);
// At this point, the condition flags can be in one of five states:
// NZCV
// 1000 -708.4 < input < 709.8 result = exp(input)
// 0110 input == 709.8 result = +infinity
// 0010 input > 709.8 result = +infinity
// 0011 input is NaN result = input
// 0000 input <= -708.4 result = +0.0
// Continue the common case first. 'mi' tests N == 1.
__ B(&result_is_finite_non_zero, mi);
// TODO(jbramley): Add (and use) a zero D register for A64.
// TODO(jbramley): Consider adding a +infinity register for A64.
__ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
__ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
// Select between +0.0 and +infinity. 'lo' tests C == 0.
__ Fcsel(result, double_temp1, double_temp2, lo);
// Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
__ Fcsel(result, result, input, vc);
__ B(&done);
// The rest is magic, as described in InitializeMathExpData().
__ Bind(&result_is_finite_non_zero);
// Assert that we can load offset 3 and offset 4 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
ExpConstant(constants, 3).offset()));
__ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
__ Fmadd(double_temp1, double_temp1, input, double_temp3);
__ Fmov(temp2.W(), double_temp1.S());
__ Fsub(double_temp1, double_temp1, double_temp3);
// Assert that we can load offset 5 and offset 6 with a single ldp.
ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
ExpConstant(constants, 5).offset()));
__ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp1, double_temp1, double_temp2);
__ Fsub(double_temp1, double_temp1, input);
__ Fmul(double_temp2, double_temp1, double_temp1);
__ Fsub(double_temp3, double_temp3, double_temp1);
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
__ Ldr(double_temp2, ExpConstant(constants, 7));
// TODO(jbramley): Consider using Fnmsub here.
__ Fmul(double_temp3, double_temp3, double_temp2);
__ Fsub(double_temp3, double_temp3, double_temp1);
// The 8th constant is 1.0, so use an immediate move rather than a load.
// We can't generate a runtime assertion here as we would need to call Abort
// in the runtime and we don't have an Isolate when we generate this code.
__ Fmov(double_temp2, 1.0);
__ Fadd(double_temp3, double_temp3, double_temp2);
__ And(temp2, temp2, 0x7ff);
__ Add(temp1, temp1, 0x3ff);
// Do the final table lookup.
__ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
__ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
__ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
__ Bfi(temp2, temp1, 32, 32);
__ Fmov(double_temp1, temp2);
__ Fmul(result, double_temp3, double_temp1);
__ Bind(&done);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

70
deps/v8/src/a64/codegen-a64.h

@ -1,70 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CODEGEN_A64_H_
#define V8_A64_CODEGEN_A64_H_
#include "ast.h"
#include "ic-inl.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_A64_CODEGEN_A64_H_

1262
deps/v8/src/a64/constants-a64.h

File diff suppressed because it is too large

199
deps/v8/src/a64/cpu-a64.cc

@ -1,199 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "a64/cpu-a64.h"
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cross_compile_ = 0;
// Initialise to smallest possible cache size.
unsigned CpuFeatures::dcache_line_size_ = 1;
unsigned CpuFeatures::icache_line_size_ = 1;
void CPU::SetUp() {
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return true;
}
void CPU::FlushICache(void* address, size_t length) {
if (length == 0) {
return;
}
#ifdef USE_SIMULATOR
// TODO(all): consider doing some cache simulation to ensure every address
// run has been synced.
USE(address);
USE(length);
#else
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
uintptr_t start = reinterpret_cast<uintptr_t>(address);
// Sizes will be used to generate a mask big enough to cover a pointer.
uintptr_t dsize = static_cast<uintptr_t>(CpuFeatures::dcache_line_size());
uintptr_t isize = static_cast<uintptr_t>(CpuFeatures::icache_line_size());
// Cache line sizes are always a power of 2.
ASSERT(CountSetBits(dsize, 64) == 1);
ASSERT(CountSetBits(isize, 64) == 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
__asm__ __volatile__ ( // NOLINT
// Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
// va : by (Virtual) Address
// u : to the point of Unification
// The point of unification for a processor is the point by which the
// instruction and data caches are guaranteed to see the same copy of a
// memory location. See ARM DDI 0406B page B2-12 for more information.
"dc cvau, %[dline] \n\t"
"add %[dline], %[dline], %[dsize] \n\t"
"cmp %[dline], %[end] \n\t"
"b.lt 0b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
// dsb : Data Synchronisation Barrier
// ish : Inner SHareable domain
// The point of unification for an Inner Shareable shareability domain is
// the point by which the instruction and data caches of all the processors
// in that Inner Shareable shareability domain are guaranteed to see the
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
// information.
"dsb ish \n\t"
// Invalidate every line of the I cache containing the target data.
"1: \n\t"
// ic : instruction cache maintenance
// i : invalidate
// va : by address
// u : to the point of unification
"ic ivau, %[iline] \n\t"
"add %[iline], %[iline], %[isize] \n\t"
"cmp %[iline], %[end] \n\t"
"b.lt 1b \n\t"
// Barrier to make sure the effect of the code above is visible to the rest
// of the world.
"dsb ish \n\t"
// Barrier to ensure any prefetching which happened before this code is
// discarded.
// isb : Instruction Synchronisation Barrier
"isb \n\t"
: [dline] "+r" (dstart),
[iline] "+r" (istart)
: [dsize] "r" (dsize),
[isize] "r" (isize),
[end] "r" (end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
#endif
}
void CpuFeatures::Probe() {
// Compute I and D cache line size. The cache type register holds
// information about the caches.
uint32_t cache_type_register = GetCacheType();
static const int kDCacheLineSizeShift = 16;
static const int kICacheLineSizeShift = 0;
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
// The cache type register holds the size of the I and D caches as a power of
// two.
uint32_t dcache_line_size_power_of_two =
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
uint32_t icache_line_size_power_of_two =
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
dcache_line_size_ = 1 << dcache_line_size_power_of_two;
icache_line_size_ = 1 << icache_line_size_power_of_two;
// AArch64 has no configuration options, no further probing is required.
supported_ = 0;
#ifdef DEBUG
initialized_ = true;
#endif
}
unsigned CpuFeatures::dcache_line_size() {
ASSERT(initialized_);
return dcache_line_size_;
}
unsigned CpuFeatures::icache_line_size() {
ASSERT(initialized_);
return icache_line_size_;
}
uint32_t CpuFeatures::GetCacheType() {
#ifdef USE_SIMULATOR
// This will lead to a cache with 1 byte long lines, which is fine since the
// simulator will not need this information.
return 0;
#else
uint32_t cache_type_register;
// Copy the content of the cache type register to a core register.
__asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
: [ctr] "=r" (cache_type_register));
return cache_type_register;
#endif
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

107
deps/v8/src/a64/cpu-a64.h

@ -1,107 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_CPU_A64_H_
#define V8_A64_CPU_A64_H_
#include <stdio.h>
#include "serialize.h"
#include "cpu.h"
namespace v8 {
namespace internal {
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
return false;
};
static bool IsFoundByRuntimeProbingOnly(CpuFeature f) {
ASSERT(initialized_);
// There are no optional features for A64.
return false;
}
static bool IsSafeForSnapshot(CpuFeature f) {
return (IsSupported(f) &&
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
// I and D cache line size in bytes.
static unsigned dcache_line_size();
static unsigned icache_line_size();
static unsigned supported_;
static bool VerifyCrossCompiling() {
// There are no optional features for A64.
ASSERT(cross_compile_ == 0);
return true;
}
static bool VerifyCrossCompiling(CpuFeature f) {
// There are no optional features for A64.
USE(f);
ASSERT(cross_compile_ == 0);
return true;
}
private:
// Return the content of the cache type register.
static uint32_t GetCacheType();
// I and D cache line size in bytes.
static unsigned icache_line_size_;
static unsigned dcache_line_size_;
#ifdef DEBUG
static bool initialized_;
#endif
// This isn't used (and is always 0), but it is required by V8.
static unsigned found_by_runtime_probing_only_;
static unsigned cross_compile_;
friend class PlatformFeatureScope;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
} } // namespace v8::internal
#endif // V8_A64_CPU_A64_H_

394
deps/v8/src/a64/debug-a64.cc

@ -1,394 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "codegen.h"
#include "debug.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
#ifdef ENABLE_DEBUGGER_SUPPORT
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return Debug::IsDebugBreakAtReturn(rinfo());
}
void BreakLocationIterator::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
// ldp fp, lr, [sp] #16
// lrd ip0, [pc, #(3 * kInstructionSize)]
// add sp, sp, ip0
// ret
// <number of paramters ...
// ... plus one (64 bits)>
// to a call to the debug break return code.
// ldr ip0, [pc, #(3 * kInstructionSize)]
// blr ip0
// hlt kHltBadCode @ code should not return, catch if it does.
// <debug break return code ...
// ... entry point address (64 bits)>
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_return()->entry();
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
patcher.LoadLiteral(ip0, 3 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break return code.
patcher.blr(ip0);
patcher.hlt(kHltBadCode);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtReturn() {
// Reset the code emitted by EmitReturnSequence to its original state.
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kJSRetSequenceInstructions);
}
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
return rinfo->IsPatchedReturnSequence();
}
bool BreakLocationIterator::IsDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
// Check whether the debug break slot instructions have been patched.
return rinfo()->IsPatchedDebugBreakSlotSequence();
}
void BreakLocationIterator::SetDebugBreakAtSlot() {
// Patch the code emitted by Debug::GenerateSlots, changing the debug break
// slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// mov x0, x0 @ nop DEBUG_BREAK_NOP
// to a call to the debug slot code.
// ldr ip0, [pc, #(2 * kInstructionSize)]
// blr ip0
// <debug break slot code ...
// ... entry point address (64 bits)>
// TODO(all): consider adding a hlt instruction after the blr as we don't
// expect control to return here. This implies increasing
// kDebugBreakSlotInstructions to 5 instructions.
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
byte* entry =
debug_info_->GetIsolate()->debug()->debug_break_slot()->entry();
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
// this call site will be registered in the frame (lr being saved as the pc
// of the next instruction to execute for this frame). The debugger can now
// iterate on the frames to find call to debug break slot code.
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<int64_t>(entry));
}
void BreakLocationIterator::ClearDebugBreakAtSlot() {
ASSERT(IsDebugBreakSlot());
rinfo()->PatchCode(original_rinfo()->pc(),
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
Register scratch) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Any live values (object_regs and non_object_regs) in caller-saved
// registers (or lr) need to be stored on the stack so that their values are
// safely preserved for a call into C code.
//
// Also:
// * object_regs may be modified during the C code by the garbage
// collector. Every object register must be a valid tagged pointer or
// SMI.
//
// * non_object_regs will be converted to SMIs so that the garbage
// collector doesn't try to interpret them as pointers.
//
// TODO(jbramley): Why can't this handle callee-saved registers?
ASSERT((~kCallerSaved.list() & object_regs) == 0);
ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
ASSERT((object_regs & non_object_regs) == 0);
ASSERT((scratch.Bit() & object_regs) == 0);
ASSERT((scratch.Bit() & non_object_regs) == 0);
ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
STATIC_ASSERT(kSmiValueSize == 32);
CPURegList non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Store each non-object register as two SMIs.
Register reg = Register(non_object_list.PopLowestIndex());
__ Push(reg);
__ Poke(wzr, 0);
__ Push(reg.W(), wzr);
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
}
if (object_regs != 0) {
__ PushXRegList(object_regs);
}
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Mov(x0, 0); // No arguments.
__ Mov(x1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub stub(1);
__ CallStub(&stub);
// Restore the register values from the expression stack.
if (object_regs != 0) {
__ PopXRegList(object_regs);
}
non_object_list =
CPURegList(CPURegister::kRegister, kXRegSize, non_object_regs);
while (!non_object_list.IsEmpty()) {
// Load each non-object register from two SMIs.
// Stack:
// jssp[12]: reg[63:32]
// jssp[8]: 0x00000000 (SMI tag & padding)
// jssp[4]: reg[31:0]
// jssp[0]: 0x00000000 (SMI tag & padding)
Register reg = Register(non_object_list.PopHighestIndex());
__ Pop(scratch, reg);
__ Bfxil(reg, scratch, 32, 32);
}
// Leave the internal frame.
}
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target(Debug_Address::AfterBreakTarget(),
masm->isolate());
__ Mov(scratch, Operand(after_break_target));
__ Ldr(scratch, MemOperand(scratch));
__ Br(scratch);
}
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x2 : name
// -- lr : return address
// -- x0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers x0 and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc).
// ----------- S t a t e -------------
// -- x0 : value
// -- x1 : receiver
// -- x2 : name
// -- lr : return address
// -----------------------------------
// Registers x0, x1, and x2 contain objects that need to be pushed on the
// expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- x0 : key
// -- x1 : receiver
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
}
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- x0 : value
// -- x1 : key
// -- x2 : receiver
// -- lr : return address
Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
}
void Debug::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
// Register state for CompareNil IC
// ----------- S t a t e -------------
// -- r0 : value
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e -------------
// -- x2 : name
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x2.Bit(), 0, x10);
}
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
// care.
Generate_DebugBreakCallHelper(masm, x0.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), 0, x10);
}
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x1 : function
// -- x2 : feedback array
// -- x3 : slot in feedback array
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit() | x2.Bit() | x3.Bit(), 0, x10);
}
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -----------------------------------
Generate_DebugBreakCallHelper(masm, x1.Bit(), x0.Bit(), x10);
}
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// Calling convention for CallConstructStub (from code-stubs-a64.cc).
// ----------- S t a t e -------------
// -- x0 : number of arguments (not smi)
// -- x1 : constructor function
// -- x2 : feedback array
// -- x3 : feedback slot (smi)
// -----------------------------------
Generate_DebugBreakCallHelper(
masm, x1.Bit() | x2.Bit() | x3.Bit(), x0.Bit(), x10);
}
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
InstructionAccurateScope scope(masm, Assembler::kDebugBreakSlotInstructions);
__ RecordDebugBreakSlot();
for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
__ nop(Assembler::DEBUG_BREAK_NOP);
}
}
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain
// object pointers.
Generate_DebugBreakCallHelper(masm, 0, 0, x10);
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnA64);
}
const bool Debug::kFrameDropperSupported = false;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

111
deps/v8/src/a64/debugger-a64.cc

@ -1,111 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_A64
#if defined(USE_SIMULATOR)
#include "a64/debugger-a64.h"
namespace v8 {
namespace internal {
void Debugger::VisitException(Instruction* instr) {
switch (instr->Mask(ExceptionMask)) {
case HLT: {
if (instr->ImmException() == kImmExceptionIsDebug) {
// Read the arguments encoded inline in the instruction stream.
uint32_t code;
uint32_t parameters;
char const * message;
ASSERT(sizeof(*pc_) == 1);
memcpy(&code, pc_ + kDebugCodeOffset, sizeof(code));
memcpy(&parameters, pc_ + kDebugParamsOffset, sizeof(parameters));
message = reinterpret_cast<char const *>(pc_ + kDebugMessageOffset);
if (message[0] == '\0') {
fprintf(stream_, "Debugger hit %" PRIu32 ".\n", code);
} else {
fprintf(stream_, "Debugger hit %" PRIu32 ": %s\n", code, message);
}
// Other options.
switch (parameters & kDebuggerTracingDirectivesMask) {
case TRACE_ENABLE:
set_log_parameters(log_parameters() | parameters);
break;
case TRACE_DISABLE:
set_log_parameters(log_parameters() & ~parameters);
break;
case TRACE_OVERRIDE:
set_log_parameters(parameters);
break;
default:
// We don't support a one-shot LOG_DISASM.
ASSERT((parameters & LOG_DISASM) == 0);
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
if (parameters & LOG_REGS) PrintRegisters(true);
if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
}
// Check if the debugger should break.
if (parameters & BREAK) OS::DebugBreak();
// The stop parameters are inlined in the code. Skip them:
// - Skip to the end of the message string.
pc_ += kDebugMessageOffset + strlen(message) + 1;
// - Advance to the next aligned location.
pc_ = AlignUp(pc_, kInstructionSize);
// - Verify that the unreachable marker is present.
ASSERT(reinterpret_cast<Instruction*>(pc_)->Mask(ExceptionMask) == HLT);
ASSERT(reinterpret_cast<Instruction*>(pc_)->ImmException() ==
kImmExceptionIsUnreachable);
// - Skip past the unreachable marker.
pc_ += kInstructionSize;
pc_modified_ = true;
} else {
Simulator::VisitException(instr);
}
break;
}
default:
UNIMPLEMENTED();
}
}
} } // namespace v8::internal
#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_A64

56
deps/v8/src/a64/debugger-a64.h

@ -1,56 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DEBUGGER_A64_H_
#define V8_A64_DEBUGGER_A64_H_
#if defined(USE_SIMULATOR)
#include "globals.h"
#include "utils.h"
#include "a64/constants-a64.h"
#include "a64/simulator-a64.h"
namespace v8 {
namespace internal {
class Debugger : public Simulator {
public:
Debugger(Decoder* decoder, FILE* stream = stderr)
: Simulator(decoder, NULL, stream) {}
// Functions overloading.
void VisitException(Instruction* instr);
};
} } // namespace v8::internal
#endif // USE_SIMULATOR
#endif // V8_A64_DEBUGGER_A64_H_

726
deps/v8/src/a64/decoder-a64.cc

@ -1,726 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
namespace v8 {
namespace internal {
// Top-level instruction decode function.
void Decoder::Decode(Instruction *instr) {
if (instr->Bits(28, 27) == 0) {
VisitUnallocated(instr);
} else {
switch (instr->Bits(27, 24)) {
// 0: PC relative addressing.
case 0x0: DecodePCRelAddressing(instr); break;
// 1: Add/sub immediate.
case 0x1: DecodeAddSubImmediate(instr); break;
// A: Logical shifted register.
// Add/sub with carry.
// Conditional compare register.
// Conditional compare immediate.
// Conditional select.
// Data processing 1 source.
// Data processing 2 source.
// B: Add/sub shifted register.
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
case 0xB: DecodeDataProcessing(instr); break;
// 2: Logical immediate.
// Move wide immediate.
case 0x2: DecodeLogical(instr); break;
// 3: Bitfield.
// Extract.
case 0x3: DecodeBitfieldExtract(instr); break;
// 4: Unconditional branch immediate.
// Exception generation.
// Compare and branch immediate.
// 5: Compare and branch immediate.
// Conditional branch.
// System.
// 6,7: Unconditional branch.
// Test and branch immediate.
case 0x4:
case 0x5:
case 0x6:
case 0x7: DecodeBranchSystemException(instr); break;
// 8,9: Load/store register pair post-index.
// Load register literal.
// Load/store register unscaled immediate.
// Load/store register immediate post-index.
// Load/store register immediate pre-index.
// Load/store register offset.
// C,D: Load/store register pair offset.
// Load/store register pair pre-index.
// Load/store register unsigned immediate.
// Advanced SIMD.
case 0x8:
case 0x9:
case 0xC:
case 0xD: DecodeLoadStore(instr); break;
// E: FP fixed point conversion.
// FP integer conversion.
// FP data processing 1 source.
// FP compare.
// FP immediate.
// FP data processing 2 source.
// FP conditional compare.
// FP conditional select.
// Advanced SIMD.
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
case 0xF: DecodeFP(instr); break;
}
}
}
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_front(new_visitor);
}
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_back(new_visitor);
}
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.insert(it, new_visitor);
}
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
std::list<DecoderVisitor*>::iterator it;
for (it = visitors_.begin(); it != visitors_.end(); it++) {
if (*it == registered_visitor) {
it++;
visitors_.insert(it, new_visitor);
return;
}
}
// We reached the end of the list. The last element must be
// registered_visitor.
ASSERT(*it == registered_visitor);
visitors_.push_back(new_visitor);
}
void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
void Decoder::DecodePCRelAddressing(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x0);
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
ASSERT(instr->Bit(28) == 0x1);
VisitPCRelAddressing(instr);
}
void Decoder::DecodeBranchSystemException(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x4) ||
(instr->Bits(27, 24) == 0x5) ||
(instr->Bits(27, 24) == 0x6) ||
(instr->Bits(27, 24) == 0x7) );
switch (instr->Bits(31, 29)) {
case 0:
case 4: {
VisitUnconditionalBranch(instr);
break;
}
case 1:
case 5: {
if (instr->Bit(25) == 0) {
VisitCompareBranch(instr);
} else {
VisitTestBranch(instr);
}
break;
}
case 2: {
if (instr->Bit(25) == 0) {
if ((instr->Bit(24) == 0x1) ||
(instr->Mask(0x01000010) == 0x00000010)) {
VisitUnallocated(instr);
} else {
VisitConditionalBranch(instr);
}
} else {
VisitUnallocated(instr);
}
break;
}
case 6: {
if (instr->Bit(25) == 0) {
if (instr->Bit(24) == 0) {
if ((instr->Bits(4, 2) != 0) ||
(instr->Mask(0x00E0001D) == 0x00200001) ||
(instr->Mask(0x00E0001D) == 0x00400001) ||
(instr->Mask(0x00E0001E) == 0x00200002) ||
(instr->Mask(0x00E0001E) == 0x00400002) ||
(instr->Mask(0x00E0001C) == 0x00600000) ||
(instr->Mask(0x00E0001C) == 0x00800000) ||
(instr->Mask(0x00E0001F) == 0x00A00000) ||
(instr->Mask(0x00C0001C) == 0x00C00000)) {
VisitUnallocated(instr);
} else {
VisitException(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
if ((instr->Bits(21, 19) == 0x4) ||
(masked_003FF0E0 == 0x00033000) ||
(masked_003FF0E0 == 0x003FF020) ||
(masked_003FF0E0 == 0x003FF060) ||
(masked_003FF0E0 == 0x003FF0E0) ||
(instr->Mask(0x00388000) == 0x00008000) ||
(instr->Mask(0x0038E000) == 0x00000000) ||
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
(instr->Mask(0x003FFFC0) == 0x000320C0) ||
(instr->Mask(0x003FF100) == 0x00032100) ||
(instr->Mask(0x003FF200) == 0x00032200) ||
(instr->Mask(0x003FF400) == 0x00032400) ||
(instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
VisitUnallocated(instr);
} else {
VisitSystem(instr);
}
} else {
VisitUnallocated(instr);
}
}
} else {
if ((instr->Bit(24) == 0x1) ||
(instr->Bits(20, 16) != 0x1F) ||
(instr->Bits(15, 10) != 0) ||
(instr->Bits(4, 0) != 0) ||
(instr->Bits(24, 21) == 0x3) ||
(instr->Bits(24, 22) == 0x3)) {
VisitUnallocated(instr);
} else {
VisitUnconditionalBranchToRegister(instr);
}
}
break;
}
case 3:
case 7: {
VisitUnallocated(instr);
break;
}
}
}
void Decoder::DecodeLoadStore(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0x8) ||
(instr->Bits(27, 24) == 0x9) ||
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) {
// TODO(all): VisitLoadStoreExclusive.
VisitUnimplemented(instr);
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePairNonTemporal(instr);
}
} else {
VisitLoadStorePairPostIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
if (instr->Mask(0xC4000000) == 0xC4000000) {
VisitUnallocated(instr);
} else {
VisitLoadLiteral(instr);
}
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(21) == 0) {
switch (instr->Bits(11, 10)) {
case 0: {
VisitLoadStoreUnscaledOffset(instr);
break;
}
case 1: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePostIndex(instr);
}
break;
}
case 2: {
// TODO(all): VisitLoadStoreRegisterOffsetUnpriv.
VisitUnimplemented(instr);
break;
}
case 3: {
if (instr->Mask(0xC4C00000) == 0xC0800000) {
VisitUnallocated(instr);
} else {
VisitLoadStorePreIndex(instr);
}
break;
}
}
} else {
if (instr->Bits(11, 10) == 0x2) {
if (instr->Bit(14) == 0) {
VisitUnallocated(instr);
} else {
VisitLoadStoreRegisterOffset(instr);
}
} else {
VisitUnallocated(instr);
}
}
}
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->Bits(31, 30) == 0x3) ||
(instr->Mask(0xC4400000) == 0x40000000)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
VisitLoadStorePairOffset(instr);
} else {
VisitLoadStorePairPreIndex(instr);
}
}
}
} else {
if (instr->Bit(29) == 0) {
VisitUnallocated(instr);
} else {
if ((instr->Mask(0x84C00000) == 0x80C00000) ||
(instr->Mask(0x44800000) == 0x44800000) ||
(instr->Mask(0x84800000) == 0x84800000)) {
VisitUnallocated(instr);
} else {
VisitLoadStoreUnsignedOffset(instr);
}
}
}
}
}
void Decoder::DecodeLogical(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x2);
if (instr->Mask(0x80400000) == 0x00400000) {
VisitUnallocated(instr);
} else {
if (instr->Bit(23) == 0) {
VisitLogicalImmediate(instr);
} else {
if (instr->Bits(30, 29) == 0x1) {
VisitUnallocated(instr);
} else {
VisitMoveWideImmediate(instr);
}
}
}
}
void Decoder::DecodeBitfieldExtract(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x3);
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else if (instr->Bit(23) == 0) {
if ((instr->Mask(0x80200000) == 0x00200000) ||
(instr->Mask(0x60000000) == 0x60000000)) {
VisitUnallocated(instr);
} else {
VisitBitfield(instr);
}
} else {
if ((instr->Mask(0x60200000) == 0x00200000) ||
(instr->Mask(0x60000000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitExtract(instr);
}
}
}
void Decoder::DecodeAddSubImmediate(Instruction* instr) {
ASSERT(instr->Bits(27, 24) == 0x1);
if (instr->Bit(23) == 1) {
VisitUnallocated(instr);
} else {
VisitAddSubImmediate(instr);
}
}
void Decoder::DecodeDataProcessing(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xA) ||
(instr->Bits(27, 24) == 0xB) );
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Mask(0x80008000) == 0x00008000) {
VisitUnallocated(instr);
} else {
VisitLogicalShifted(instr);
}
} else {
switch (instr->Bits(23, 21)) {
case 0: {
if (instr->Mask(0x0000FC00) != 0) {
VisitUnallocated(instr);
} else {
VisitAddSubWithCarry(instr);
}
break;
}
case 2: {
if ((instr->Bit(29) == 0) ||
(instr->Mask(0x00000410) != 0)) {
VisitUnallocated(instr);
} else {
if (instr->Bit(11) == 0) {
VisitConditionalCompareRegister(instr);
} else {
VisitConditionalCompareImmediate(instr);
}
}
break;
}
case 4: {
if (instr->Mask(0x20000800) != 0x00000000) {
VisitUnallocated(instr);
} else {
VisitConditionalSelect(instr);
}
break;
}
case 6: {
if (instr->Bit(29) == 0x1) {
VisitUnallocated(instr);
} else {
if (instr->Bit(30) == 0) {
if ((instr->Bit(15) == 0x1) ||
(instr->Bits(15, 11) == 0) ||
(instr->Bits(15, 12) == 0x1) ||
(instr->Bits(15, 12) == 0x3) ||
(instr->Bits(15, 13) == 0x3) ||
(instr->Mask(0x8000EC00) == 0x00004C00) ||
(instr->Mask(0x8000E800) == 0x80004000) ||
(instr->Mask(0x8000E400) == 0x80004000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing2Source(instr);
}
} else {
if ((instr->Bit(13) == 1) ||
(instr->Bits(20, 16) != 0) ||
(instr->Bits(15, 14) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing1Source(instr);
}
}
break;
}
}
case 1:
case 3:
case 5:
case 7: VisitUnallocated(instr); break;
}
}
} else {
if (instr->Bit(28) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
VisitUnallocated(instr);
} else {
VisitAddSubShifted(instr);
}
} else {
if ((instr->Mask(0x00C00000) != 0x00000000) ||
(instr->Mask(0x00001400) == 0x00001400) ||
(instr->Mask(0x00001800) == 0x00001800)) {
VisitUnallocated(instr);
} else {
VisitAddSubExtended(instr);
}
}
} else {
if ((instr->Bit(30) == 0x1) ||
(instr->Bits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
(instr->Mask(0x60E00000) == 0x00E00000) ||
(instr->Mask(0x60E00000) == 0x00800000) ||
(instr->Mask(0x60E00000) == 0x00600000)) {
VisitUnallocated(instr);
} else {
VisitDataProcessing3Source(instr);
}
}
}
}
void Decoder::DecodeFP(Instruction* instr) {
ASSERT((instr->Bits(27, 24) == 0xE) ||
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(29) == 1) {
VisitUnallocated(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
(instr->Bit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x00160000) == 0x00000000) ||
(instr->Mask(0x00160000) == 0x00120000)) {
VisitUnallocated(instr);
} else {
VisitFPFixedPointConvert(instr);
}
} else {
if (instr->Bits(15, 10) == 32) {
VisitUnallocated(instr);
} else if (instr->Bits(15, 10) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
(instr->Mask(0x000E0000) == 0x000C0000) ||
(instr->Mask(0x00160000) == 0x00120000) ||
(instr->Mask(0x00160000) == 0x00140000) ||
(instr->Mask(0x20C40000) == 0x00800000) ||
(instr->Mask(0x20C60000) == 0x00840000) ||
(instr->Mask(0xA0C60000) == 0x80060000) ||
(instr->Mask(0xA0C60000) == 0x00860000) ||
(instr->Mask(0xA0C60000) == 0x00460000) ||
(instr->Mask(0xA0CE0000) == 0x80860000) ||
(instr->Mask(0xA0CE0000) == 0x804E0000) ||
(instr->Mask(0xA0CE0000) == 0x000E0000) ||
(instr->Mask(0xA0D60000) == 0x00160000) ||
(instr->Mask(0xA0D60000) == 0x80560000) ||
(instr->Mask(0xA0D60000) == 0x80960000)) {
VisitUnallocated(instr);
} else {
VisitFPIntegerConvert(instr);
}
} else if (instr->Bits(14, 10) == 16) {
const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
if ((instr->Mask(0x80180000) != 0) ||
(masked_A0DF8000 == 0x00020000) ||
(masked_A0DF8000 == 0x00030000) ||
(masked_A0DF8000 == 0x00068000) ||
(masked_A0DF8000 == 0x00428000) ||
(masked_A0DF8000 == 0x00430000) ||
(masked_A0DF8000 == 0x00468000) ||
(instr->Mask(0xA0D80000) == 0x00800000) ||
(instr->Mask(0xA0DE0000) == 0x00C00000) ||
(instr->Mask(0xA0DF0000) == 0x00C30000) ||
(instr->Mask(0xA0DC0000) == 0x00C40000)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing1Source(instr);
}
} else if (instr->Bits(13, 10) == 8) {
if ((instr->Bits(15, 14) != 0) ||
(instr->Bits(2, 0) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitFPCompare(instr);
}
} else if (instr->Bits(12, 10) == 4) {
if ((instr->Bits(9, 5) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
VisitUnallocated(instr);
} else {
VisitFPImmediate(instr);
}
} else {
if (instr->Mask(0x80800000) != 0x00000000) {
VisitUnallocated(instr);
} else {
switch (instr->Bits(11, 10)) {
case 1: {
VisitFPConditionalCompare(instr);
break;
}
case 2: {
if ((instr->Bits(15, 14) == 0x3) ||
(instr->Mask(0x00009000) == 0x00009000) ||
(instr->Mask(0x0000A000) == 0x0000A000)) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing2Source(instr);
}
break;
}
case 3: {
VisitFPConditionalSelect(instr);
break;
}
default: UNREACHABLE();
}
}
}
}
} else {
// Bit 30 == 1 has been handled earlier.
ASSERT(instr->Bit(30) == 0);
if (instr->Mask(0xA0800000) != 0) {
VisitUnallocated(instr);
} else {
VisitFPDataProcessing3Source(instr);
}
}
}
}
}
}
void Decoder::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
ASSERT(instr->Bits(29, 25) == 0x6);
VisitUnimplemented(instr);
}
void Decoder::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
ASSERT(instr->Bits(27, 25) == 0x7);
VisitUnimplemented(instr);
}
#define DEFINE_VISITOR_CALLERS(A) \
void Decoder::Visit##A(Instruction *instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
ASSERT(instr->Mask(A##FMask) == A##Fixed); \
} \
std::list<DecoderVisitor*>::iterator it; \
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
(*it)->Visit##A(instr); \
} \
}
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

202
deps/v8/src/a64/decoder-a64.h

@ -1,202 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DECODER_A64_H_
#define V8_A64_DECODER_A64_H_
#include <list>
#include "globals.h"
#include "a64/instructions-a64.h"
namespace v8 {
namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
// must provide implementations for all of these functions.
class DecoderVisitor {
public:
#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
#undef DECLARE
virtual ~DecoderVisitor() {}
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
friend class Decoder;
};
class Decoder: public DecoderVisitor {
public:
explicit Decoder() {}
// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
void Decode(Instruction *instr);
// Register a new visitor class with the decoder.
// Decode() will call the corresponding visitor method from all registered
// visitor classes when decoding reaches the leaf node of the instruction
// decode tree.
// Visitors are called in the order.
// A visitor can only be registered once.
// Registering an already registered visitor will update its position.
//
// d.AppendVisitor(V1);
// d.AppendVisitor(V2);
// d.PrependVisitor(V2); // Move V2 at the start of the list.
// d.InsertVisitorBefore(V3, V2);
// d.AppendVisitor(V4);
// d.AppendVisitor(V4); // No effect.
//
// d.Decode(i);
//
// will call in order visitor methods in V3, V2, V1, V4.
void AppendVisitor(DecoderVisitor* visitor);
void PrependVisitor(DecoderVisitor* visitor);
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
// Remove a previously registered visitor class from the list of visitors
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
// Decode the PC relative addressing instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x0.
void DecodePCRelAddressing(Instruction* instr);
// Decode the add/subtract immediate instruction, and call the corresponding
// visitors.
// On entry, instruction bits 27:24 = 0x1.
void DecodeAddSubImmediate(Instruction* instr);
// Decode the branch, system command, and exception generation parts of
// the instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
void DecodeBranchSystemException(Instruction* instr);
// Decode the load and store parts of the instruction tree, and call
// the corresponding visitors.
// On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
void DecodeLoadStore(Instruction* instr);
// Decode the logical immediate and move wide immediate parts of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x2.
void DecodeLogical(Instruction* instr);
// Decode the bitfield and extraction parts of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 27:24 = 0x3.
void DecodeBitfieldExtract(Instruction* instr);
// Decode the data processing parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
void DecodeDataProcessing(Instruction* instr);
// Decode the floating point parts of the instruction tree, and call the
// corresponding visitors.
// On entry, instruction bits 27:24 = {0xE, 0xF}.
void DecodeFP(Instruction* instr);
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
};
} } // namespace v8::internal
#endif // V8_A64_DECODER_A64_H_

376
deps/v8/src/a64/deoptimizer-a64.cc

@ -1,376 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
int Deoptimizer::patch_size() {
// Size of the code used to patch lazy bailout points.
// Patching is done by Deoptimizer::DeoptimizeFunction.
return 4 * kInstructionSize;
}
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
patcher.LoadLiteral(ip0, 2 * kInstructionSize);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
ASSERT((prev_call_address == NULL) ||
(call_address >= prev_call_address + patch_size()));
ASSERT(call_address + patch_size() <= code->instruction_end());
#ifdef DEBUG
prev_call_address = call_address;
#endif
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::NumRegisters(); i++) {
input_->SetRegister(i, 0);
}
// TODO(all): Do we also need to set a value to csp?
input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
}
}
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// There is no dynamic alignment padding on A64 in the input frame.
return false;
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler_);
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(x0.code(), params);
output_frame->SetRegister(x1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
Code* Deoptimizer::NotifyStubFailureBuiltin() {
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TODO(all): This code needs to be revisited. We probably only need to save
// caller-saved registers here. Callee-saved registers can be stored directly
// in the input frame.
// Save all allocatable floating point registers.
CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSize,
0, FPRegister::NumAllocatableRegisters() - 1);
__ PushCPURegList(saved_fp_registers);
// We save all the registers expcept jssp, sp and lr.
CPURegList saved_registers(CPURegister::kRegister, kXRegSize, 0, 27);
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
const int kSavedRegistersAreaSize =
(saved_registers.Count() * kXRegSizeInBytes) +
(saved_fp_registers.Count() * kDRegSizeInBytes);
// Floating point registers are saved on the stack above core registers.
const int kFPRegistersOffset = saved_registers.Count() * kXRegSizeInBytes;
// Get the bailout id from the stack.
Register bailout_id = x2;
__ Peek(bailout_id, kSavedRegistersAreaSize);
Register code_object = x3;
Register fp_to_sp = x4;
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
__ Add(fp_to_sp, masm()->StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
// - x4: fp-to-sp delta
__ Mov(x5, Operand(ExternalReference::isolate_address(isolate())));
{
// Call Deoptimizer::New().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
}
// Preserve "deoptimizer" object in register x0.
Register deoptimizer = x0;
// Get the input frame descriptor pointer.
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
// Copy FP registers to the input frame.
for (int i = 0; i < saved_fp_registers.Count(); i++) {
// TODO(all): Look for opportunities to optimize this by using ldp/stp.
int dst_offset = FrameDescription::double_registers_offset() +
(i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
__ Str(x2, MemOperand(x1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSizeInBytes));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
__ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
Label pop_loop;
Label pop_loop_header;
__ B(&pop_loop_header);
__ Bind(&pop_loop);
__ Pop(x4);
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
__ Bind(&pop_loop_header);
__ Cmp(unwind_limit, __ StackPointer());
__ B(ne, &pop_loop);
// Compute the output frame in the deoptimizer.
__ Push(x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
__ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
__ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
__ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
__ B(&outer_loop_header);
__ Bind(&outer_push_loop);
Register current_frame = x2;
__ Ldr(current_frame, MemOperand(x0, 0));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ B(&inner_loop_header);
__ Bind(&inner_push_loop);
__ Sub(x3, x3, kPointerSize);
__ Add(x6, current_frame, x3);
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
__ Push(x7);
__ Bind(&inner_loop_header);
__ Cbnz(x3, &inner_push_loop);
__ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
int src_offset = FrameDescription::double_registers_offset();
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
__ Ldr(reg, MemOperand(x1, src_offset));
src_offset += kDoubleSize;
}
// Push state from the last output frame.
__ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
__ Push(x6);
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// TODO(all): This code needs to be revisited, We probably don't need to
// restore all the registers as fullcodegen does not keep live values in
// registers (note that at least fp must be restored though).
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
// reloading the other registers.
ASSERT(!saved_registers.IncludesAliasOf(lr));
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
// We don't need to restore x7 as it will be clobbered later to hold the
// continuation address.
Register continuation = x7;
saved_registers.Remove(continuation);
while (!saved_registers.IsEmpty()) {
// TODO(all): Look for opportunities to optimize this by using ldp.
CPURegister current_reg = saved_registers.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Ldr(current_reg, MemOperand(last_output_frame, offset));
}
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
__ InitializeRootRegister();
__ Br(continuation);
}
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
{
InstructionAccurateScope scope(masm());
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(masm()->Tmp0(), i);
__ b(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
}
__ Bind(&done);
// TODO(all): We need to add some kind of assertion to verify that Tmp0()
// is not clobbered by Push.
__ Push(masm()->Tmp0());
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
#undef __
} } // namespace v8::internal

1854
deps/v8/src/a64/disasm-a64.cc

File diff suppressed because it is too large

115
deps/v8/src/a64/disasm-a64.h

@ -1,115 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_DISASM_A64_H
#define V8_A64_DISASM_A64_H
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "instructions-a64.h"
#include "decoder-a64.h"
namespace v8 {
namespace internal {
class Disassembler: public DecoderVisitor {
public:
Disassembler();
Disassembler(char* text_buffer, int buffer_size);
virtual ~Disassembler();
char* GetOutput();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
protected:
virtual void ProcessOutput(Instruction* instr);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
int SubstituteRegisterField(Instruction* instr, const char* format);
int SubstituteImmediateField(Instruction* instr, const char* format);
int SubstituteLiteralField(Instruction* instr, const char* format);
int SubstituteBitfieldImmediateField(Instruction* instr, const char* format);
int SubstituteShiftField(Instruction* instr, const char* format);
int SubstituteExtendField(Instruction* instr, const char* format);
int SubstituteConditionField(Instruction* instr, const char* format);
int SubstitutePCRelAddressField(Instruction* instr, const char* format);
int SubstituteBranchTargetField(Instruction* instr, const char* format);
int SubstituteLSRegOffsetField(Instruction* instr, const char* format);
int SubstitutePrefetchField(Instruction* instr, const char* format);
int SubstituteBarrierField(Instruction* instr, const char* format);
bool RdIsZROrSP(Instruction* instr) const {
return (instr->Rd() == kZeroRegCode);
}
bool RnIsZROrSP(Instruction* instr) const {
return (instr->Rn() == kZeroRegCode);
}
bool RmIsZROrSP(Instruction* instr) const {
return (instr->Rm() == kZeroRegCode);
}
bool RaIsZROrSP(Instruction* instr) const {
return (instr->Ra() == kZeroRegCode);
}
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
void ResetOutput();
void AppendToOutput(const char* string, ...);
char* buffer_;
uint32_t buffer_pos_;
uint32_t buffer_size_;
bool own_buffer_;
};
class PrintDisassembler: public Disassembler {
public:
explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
~PrintDisassembler() { }
virtual void ProcessOutput(Instruction* instr);
private:
FILE *stream_;
};
} } // namespace v8::internal
#endif // V8_A64_DISASM_A64_H

57
deps/v8/src/a64/frames-a64.cc

@ -1,57 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#include "assembler.h"
#include "assembler-a64.h"
#include "assembler-a64-inl.h"
#include "frames.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Object*& ExitFrame::constant_pool_slot() const {
UNREACHABLE();
return Memory::Object_at(NULL);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

131
deps/v8/src/a64/frames-a64.h

@ -1,131 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/constants-a64.h"
#include "a64/assembler-a64.h"
#ifndef V8_A64_FRAMES_A64_H_
#define V8_A64_FRAMES_A64_H_
namespace v8 {
namespace internal {
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
const int kNumJSCallerSaved = 18;
const RegList kJSCallerSaved = 0x3ffff;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of eight.
// TODO(all): Refine this number.
const int kNumSafepointRegisters = 32;
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
#define kSafepointSavedRegisters CPURegList::GetSafepointSavedRegisters().list()
#define kNumSafepointSavedRegisters \
CPURegList::GetSafepointSavedRegisters().Count();
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset =
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
};
class ExitFrameConstants : public AllStatic {
public:
static const int kFrameSize = 2 * kPointerSize;
static const int kCallerSPDisplacement = 2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; // <- fp
static const int kSPOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kLastExitFrameField = kCodeOffset;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
// There are two words on the stack (saved fp and saved lr) between fp and
// the arguments.
static const int kLastParameterOffset = 2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
};
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
class ConstructFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
static const int kLengthOffset = -4 * kPointerSize;
static const int kConstructorOffset = -5 * kPointerSize;
static const int kImplicitReceiverOffset = -6 * kPointerSize;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
};
class InternalFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
};
inline Object* JavaScriptFrame::function_slot_object() const {
const int offset = JavaScriptFrameConstants::kFunctionOffset;
return Memory::Object_at(fp() + offset);
}
inline void StackHandler::SetFp(Address slot, Address fp) {
Memory::Address_at(slot) = fp;
}
} } // namespace v8::internal
#endif // V8_A64_FRAMES_A64_H_

5010
deps/v8/src/a64/full-codegen-a64.cc

File diff suppressed because it is too large

1413
deps/v8/src/a64/ic-a64.cc

File diff suppressed because it is too large

334
deps/v8/src/a64/instructions-a64.cc

@ -1,334 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if V8_TARGET_ARCH_A64
#define A64_DEFINE_FP_STATICS
#include "a64/instructions-a64.h"
#include "a64/assembler-a64-inl.h"
namespace v8 {
namespace internal {
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
ASSERT(width <= 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
}
static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
uint64_t value,
unsigned width) {
ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
uint64_t result = value & ((1UL << width) - 1UL);
for (unsigned i = width; i < reg_size; i *= 2) {
result |= (result << i);
}
return result;
}
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
uint64_t Instruction::ImmLogical() {
unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
int64_t n = BitN();
int64_t imm_s = ImmSetBits();
int64_t imm_r = ImmRotate();
// An integer is constructed from the n, imm_s and imm_r bits according to
// the following table:
//
// N imms immr size S R
// 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
// 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
// 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
// 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
// 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
// 0 11110s xxxxxr 2 UInt(s) UInt(r)
// (s bits must not be all set)
//
// A pattern is constructed of size bits, where the least significant S+1
// bits are set. The pattern is rotated right by R, and repeated across a
// 32 or 64-bit value, depending on destination register width.
//
if (n == 1) {
if (imm_s == 0x3F) {
return 0;
}
uint64_t bits = (1UL << (imm_s + 1)) - 1;
return RotateRight(bits, imm_r, 64);
} else {
if ((imm_s >> 1) == 0x1F) {
return 0;
}
for (int width = 0x20; width >= 0x2; width >>= 1) {
if ((imm_s & width) == 0) {
int mask = width - 1;
if ((imm_s & mask) == mask) {
return 0;
}
uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
return RepeatBitsAcrossReg(reg_size,
RotateRight(bits, imm_r & mask, width),
width);
}
}
}
UNREACHABLE();
return 0;
}
float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return rawbits_to_float(result);
}
double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return rawbits_to_double(result);
}
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
switch (op) {
case STP_x:
case LDP_x:
case STP_d:
case LDP_d: return LSDoubleWord;
default: return LSWord;
}
}
ptrdiff_t Instruction::ImmPCOffset() {
ptrdiff_t offset;
if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported.
offset = ImmPCRel();
} else if (BranchType() != UnknownBranchType) {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
ASSERT(IsLdrLiteral());
// The offset is always shifted by 2 bits, even for loads to 64-bits
// registers.
offset = ImmLLiteral() << kInstructionSizeLog2;
}
return offset;
}
Instruction* Instruction::ImmPCOffsetTarget() {
return this + ImmPCOffset();
}
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
int32_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
int offset = target - this;
return IsValidImmPCOffset(BranchType(), offset);
}
void Instruction::SetImmPCOffsetTarget(Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else {
SetImmLLiteral(target);
}
}
void Instruction::SetPCRelImmTarget(Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
ASSERT(Mask(PCRelAddressingMask) == ADR);
Instr imm = Assembler::ImmPCRelAddress(target - this);
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
}
void Instruction::SetBranchImmTarget(Instruction* target) {
ASSERT(((target - this) & 3) == 0);
Instr branch_imm = 0;
uint32_t imm_mask = 0;
int offset = (target - this) >> kInstructionSizeLog2;
switch (BranchType()) {
case CondBranchType: {
branch_imm = Assembler::ImmCondBranch(offset);
imm_mask = ImmCondBranch_mask;
break;
}
case UncondBranchType: {
branch_imm = Assembler::ImmUncondBranch(offset);
imm_mask = ImmUncondBranch_mask;
break;
}
case CompareBranchType: {
branch_imm = Assembler::ImmCmpBranch(offset);
imm_mask = ImmCmpBranch_mask;
break;
}
case TestBranchType: {
branch_imm = Assembler::ImmTestBranch(offset);
imm_mask = ImmTestBranch_mask;
break;
}
default: UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
void Instruction::SetImmLLiteral(Instruction* source) {
ASSERT(((source - this) & 3) == 0);
int offset = (source - this) >> kLiteralEntrySizeLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
SetInstructionBits(Mask(~mask) | imm);
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
bool InstructionSequence::IsInlineData() const {
// Inline data is encoded as a single movz instruction which writes to xzr
// (x31).
return IsMovz() && SixtyFourBits() && (Rd() == xzr.code());
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
}
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-a64-inl.h to work around this.
uint64_t InstructionSequence::InlineData() const {
ASSERT(IsInlineData());
uint64_t payload = ImmMoveWide();
// TODO(all): If we extend ::InlineData() to support bigger data, we need
// to update this method too.
return payload;
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

516
deps/v8/src/a64/instructions-a64.h

@ -1,516 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUCTIONS_A64_H_
#define V8_A64_INSTRUCTIONS_A64_H_
#include "globals.h"
#include "utils.h"
#include "a64/constants-a64.h"
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
// ISA constants. --------------------------------------------------------------
typedef uint32_t Instr;
// The following macros initialize a float/double variable with a bit pattern
// without using static initializers: If A64_DEFINE_FP_STATICS is defined, the
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(A64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(A64_DEFINE_FP_STATICS)
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
DEFINE_DOUBLE(kFP64NegativeInfinity, 0xfff0000000000000UL);
// This value is a signalling NaN as both a double and as a float (taking the
// least-significant word).
DEFINE_DOUBLE(kFP64SignallingNaN, 0x7ff000007f800001);
DEFINE_FLOAT(kFP32SignallingNaN, 0x7f800001);
// A similar value, but as a quiet NaN.
DEFINE_DOUBLE(kFP64QuietNaN, 0x7ff800007fc00001);
DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
LSWord = 2,
LSDoubleWord = 3
};
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
CondBranchType = 1,
UncondBranchType = 2,
CompareBranchType = 3,
TestBranchType = 4
};
enum AddrMode {
Offset,
PreIndex,
PostIndex
};
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
FPTieEven = 0x0,
FPPositiveInfinity = 0x1,
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding mode is only available when explicitly specified by the
// instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway
};
enum Reg31Mode {
Reg31IsStackPointer,
Reg31IsZeroRegister
};
// Instructions. ---------------------------------------------------------------
class Instruction {
public:
Instr InstructionBits() const {
Instr bits;
memcpy(&bits, this, sizeof(bits));
return bits;
}
void SetInstructionBits(Instr new_instr) {
memcpy(this, &new_instr, sizeof(new_instr));
}
int Bit(int pos) const {
return (InstructionBits() >> pos) & 1;
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
}
int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits);
}
Instr Mask(uint32_t mask) const {
return InstructionBits() & mask;
}
Instruction* following(int count = 1) {
return this + count * kInstructionSize;
}
Instruction* preceding(int count = 1) {
return this - count * kInstructionSize;
}
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
int ImmPCRel() const {
int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
int const width = ImmPCRelLo_width + ImmPCRelHi_width;
return signed_bitextract_32(width-1, 0, offset);
}
uint64_t ImmLogical();
float ImmFP32();
double ImmFP64();
LSDataSize SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
}
bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
}
bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed;
}
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsLdrLiteralX() const {
return Mask(LoadLiteralMask) == LDR_x_lit;
}
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
}
bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
}
// Match any loads or stores, including pairs.
bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
}
// Match any loads, including pairs.
bool IsLoad() const;
// Match any stores, including pairs.
bool IsStore() const;
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
// The following instructions use csp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
// Otherwise, r31 is the zero register.
if (IsAddSubImmediate() || IsAddSubExtended()) {
if (Mask(AddSubSetFlagsBit)) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into csp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
return Reg31IsZeroRegister;
} else {
return Reg31IsStackPointer;
}
}
return Reg31IsZeroRegister;
}
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
// The following instructions use csp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).
// Otherwise, r31 is the zero register.
if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
return Reg31IsStackPointer;
}
return Reg31IsZeroRegister;
}
ImmBranchType BranchType() const {
if (IsCondBranchImm()) {
return CondBranchType;
} else if (IsUncondBranchImm()) {
return UncondBranchType;
} else if (IsCompareBranch()) {
return CompareBranchType;
} else if (IsTestBranch()) {
return TestBranchType;
} else {
return UnknownBranchType;
}
}
static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
switch (branch_type) {
case UncondBranchType:
return ImmUncondBranch_width;
case CondBranchType:
return ImmCondBranch_width;
case CompareBranchType:
return ImmCmpBranch_width;
case TestBranchType:
return ImmTestBranch_width;
default:
UNREACHABLE();
return 0;
}
}
// The range of the branch instruction, expressed as 'instr +- range'.
static int32_t ImmBranchRange(ImmBranchType branch_type) {
return
(1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
kInstructionSize;
}
int ImmBranch() const {
switch (BranchType()) {
case CondBranchType: return ImmCondBranch();
case UncondBranchType: return ImmUncondBranch();
case CompareBranchType: return ImmCmpBranch();
case TestBranchType: return ImmTestBranch();
default: UNREACHABLE();
}
return 0;
}
bool IsBranchAndLinkToRegister() const {
return Mask(UnconditionalBranchToRegisterMask) == BLR;
}
bool IsMovz() const {
return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
(Mask(MoveWideImmediateMask) == MOVZ_w);
}
bool IsMovk() const {
return (Mask(MoveWideImmediateMask) == MOVK_x) ||
(Mask(MoveWideImmediateMask) == MOVK_w);
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w);
}
bool IsNop(int n) {
// A marking nop is an instruction
// mov r<n>, r<n>
// which is encoded as
// orr r<n>, xzr, r<n>
return (Mask(LogicalShiftedMask) == ORR_x) &&
(Rd() == Rm()) &&
(Rd() == n);
}
// Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction.
// The offset returned is unscaled.
ptrdiff_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() {
int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
return reinterpret_cast<uint8_t*>(this) + offset;
}
uint32_t Literal32() {
uint32_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
return literal;
}
uint64_t Literal64() {
uint64_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal));
return literal;
}
float LiteralFP32() {
return rawbits_to_float(Literal32());
}
double LiteralFP64() {
return rawbits_to_double(Literal64());
}
Instruction* NextInstruction() {
return this + kInstructionSize;
}
Instruction* InstructionAtOffset(int64_t offset) {
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(this) + offset,
kInstructionSize));
return this + offset;
}
template<typename T> static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
void SetPCRelImmTarget(Instruction* target);
void SetBranchImmTarget(Instruction* target);
};
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
// MacroAssembler.
class InstructionSequence : public Instruction {
public:
static InstructionSequence* At(Address address) {
return reinterpret_cast<InstructionSequence*>(address);
}
// Sequences generated by MacroAssembler::InlineData().
bool IsInlineData() const;
uint64_t InlineData() const;
};
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
// marker encodes arguments in a different way, as described below.
// Indicate to the Debugger that the instruction is a redirected call.
const Instr kImmExceptionIsRedirectedCall = 0xca11;
// Represent unreachable code. This is used as a guard in parts of the code that
// should not be reachable, such as in data encoded inline in the instructions.
const Instr kImmExceptionIsUnreachable = 0xdebf;
// A pseudo 'printf' instruction. The arguments will be passed to the platform
// printf method.
const Instr kImmExceptionIsPrintf = 0xdeb1;
// Parameters are stored in A64 registers as if the printf pseudo-instruction
// was a call to the real printf method:
//
// x0: The format string, then either of:
// x1-x7: Optional arguments.
// d0-d7: Optional arguments.
//
// Floating-point and integer arguments are passed in separate sets of
// registers in AAPCS64 (even for varargs functions), so it is not possible to
// determine the type of location of each arguments without some information
// about the values that were passed in. This information could be retrieved
// from the printf format string, but the format string is not trivial to
// parse so we encode the relevant information with the HLT instruction.
// - Type
// Either kRegister or kFPRegister, but stored as a uint32_t because there's
// no way to guarantee the size of the CPURegister::RegisterType enum.
const unsigned kPrintfTypeOffset = 1 * kInstructionSize;
const unsigned kPrintfLength = 2 * kInstructionSize;
// A pseudo 'debug' instruction.
const Instr kImmExceptionIsDebug = 0xdeb0;
// Parameters are inlined in the code after a debug pseudo-instruction:
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a NULL-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
const unsigned kDebugCodeOffset = 1 * kInstructionSize;
const unsigned kDebugParamsOffset = 2 * kInstructionSize;
const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// Debug parameters.
// Used without a TRACE_ option, the Debugger will print the arguments only
// once. Otherwise TRACE_ENABLE and TRACE_DISABLE will enable or disable tracing
// before every instruction for the specified LOG_ parameters.
//
// TRACE_OVERRIDE enables the specified LOG_ parameters, and disabled any
// others that were not specified.
//
// For example:
//
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
// starts disassembling the code.
//
// __ debug("trace rets", 2, TRACE_ENABLE | LOG_REGS);
// adds the general purpose registers to the trace.
//
// __ debug("stop regs", 3, TRACE_DISABLE | LOG_REGS);
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
} } // namespace v8::internal
#endif // V8_A64_INSTRUCTIONS_A64_H_

618
deps/v8/src/a64/instrument-a64.cc

@ -1,618 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "a64/instrument-a64.h"
namespace v8 {
namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
ASSERT(name != NULL);
strncpy(name_, name, kCounterNameMaxLength);
}
void Counter::Enable() {
enabled_ = true;
}
void Counter::Disable() {
enabled_ = false;
}
bool Counter::IsEnabled() {
return enabled_;
}
void Counter::Increment() {
if (enabled_) {
count_++;
}
}
uint64_t Counter::count() {
uint64_t result = count_;
if (type_ == Gauge) {
// If the counter is a Gauge, reset the count after reading.
count_ = 0;
}
return result;
}
const char* Counter::name() {
return name_;
}
CounterType Counter::type() {
return type_;
}
typedef struct {
const char* name;
CounterType type;
} CounterDescriptor;
static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative},
{"Move Immediate", Gauge},
{"Add/Sub DP", Gauge},
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},
{"Unconditional Branch", Gauge},
{"Compare and Branch", Gauge},
{"Test and Branch", Gauge},
{"Conditional Branch", Gauge},
{"Load Integer", Gauge},
{"Load FP", Gauge},
{"Load Pair", Gauge},
{"Load Literal", Gauge},
{"Store Integer", Gauge},
{"Store FP", Gauge},
{"Store Pair", Gauge},
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stderr.
if (datafile != NULL) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
output_stream_ = stderr;
}
}
static const int num_counters =
sizeof(kCounterList) / sizeof(CounterDescriptor);
// Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters);
fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
// Construct Counter objects from counter description array.
for (int i = 0; i < num_counters; i++) {
Counter* counter = new Counter(kCounterList[i].name, kCounterList[i].type);
counters_.push_back(counter);
}
DumpCounterNames();
}
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
// Free all the counter objects.
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
delete *it;
}
if (output_stream_ != stderr) {
fclose(output_stream_);
}
}
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
static Counter* counter = GetCounter("Instruction");
ASSERT(counter->type() == Cumulative);
counter->Increment();
if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
DumpCounters();
}
}
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%" PRIu64 ",", (*it)->count());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
fprintf(output_stream_, "%s,", (*it)->name());
}
fprintf(output_stream_, "\n");
fflush(output_stream_);
}
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
case InstrumentStateEnable: Enable(); break;
case InstrumentStateDisable: Disable(); break;
default: DumpEventMarker(event);
}
}
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
static Counter* counter = GetCounter("Instruction");
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
(marker >> 8) & 0xff, counter->count());
}
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
if (strcmp((*it)->name(), name) == 0) {
return *it;
}
}
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
"# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Enable();
}
}
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
(*it)->Disable();
}
}
void Instrument::VisitPCRelAddressing(Instruction* instr) {
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitLogicalImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitMoveWideImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
unsigned imm = instr->ImmMoveWide();
HandleInstrumentationEvent(imm);
} else {
counter->Increment();
}
}
void Instrument::VisitBitfield(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitExtract(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitUnconditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
void Instrument::VisitCompareBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
void Instrument::VisitTestBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
void Instrument::VisitConditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
void Instrument::VisitSystem(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitException(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::InstrumentLoadStorePair(Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
if (instr->Mask(LoadStorePairLBit) != 0) {
load_pair_counter->Increment();
} else {
store_pair_counter->Increment();
}
}
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreOpMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
case STR_x: store_int_counter->Increment(); break;
case STR_s: // Fall through.
case STR_d: store_fp_counter->Increment(); break;
case LDRB_w: // Fall through.
case LDRH_w: // Fall through.
case LDR_w: // Fall through.
case LDR_x: // Fall through.
case LDRSB_x: // Fall through.
case LDRSH_x: // Fall through.
case LDRSW_x: // Fall through.
case LDRSB_w: // Fall through.
case LDRSH_w: load_int_counter->Increment(); break;
case LDR_s: // Fall through.
case LDR_d: load_fp_counter->Increment(); break;
default: UNREACHABLE();
}
}
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
void Instrument::VisitLogicalShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
void Instrument::VisitAddSubShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
}
void Instrument::VisitAddSubWithCarry(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
void Instrument::VisitFPCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPConditionalCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
void Instrument::VisitFPConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
void Instrument::VisitFPImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPIntegerConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
void Instrument::VisitUnimplemented(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
} } // namespace v8::internal

108
deps/v8/src/a64/instrument-a64.h

@ -1,108 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_INSTRUMENT_A64_H_
#define V8_A64_INSTRUMENT_A64_H_
#include "globals.h"
#include "utils.h"
#include "a64/decoder-a64.h"
#include "a64/constants-a64.h"
#include "a64/instrument-a64.h"
namespace v8 {
namespace internal {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
enum InstrumentState {
InstrumentStateDisable = 0,
InstrumentStateEnable = 1
};
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
class Counter {
public:
Counter(const char* name, CounterType type = Gauge);
void Increment();
void Enable();
void Disable();
bool IsEnabled();
uint64_t count();
const char* name();
CounterType type();
private:
char name_[kCounterNameMaxLength];
uint64_t count_;
bool enabled_;
CounterType type_;
};
class Instrument: public DecoderVisitor {
public:
explicit Instrument(const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
private:
void Update();
void Enable();
void Disable();
void DumpCounters();
void DumpCounterNames();
void DumpEventMarker(unsigned marker);
void HandleInstrumentationEvent(unsigned event);
Counter* GetCounter(const char* name);
void InstrumentLoadStore(Instruction* instr);
void InstrumentLoadStorePair(Instruction* instr);
std::list<Counter*> counters_;
FILE *output_stream_;
uint64_t sample_period_;
};
} } // namespace v8::internal
#endif // V8_A64_INSTRUMENT_A64_H_

2449
deps/v8/src/a64/lithium-a64.cc

File diff suppressed because it is too large

2967
deps/v8/src/a64/lithium-a64.h

File diff suppressed because it is too large

5692
deps/v8/src/a64/lithium-codegen-a64.cc

File diff suppressed because it is too large

473
deps/v8/src/a64/lithium-codegen-a64.h

@ -1,473 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_CODEGEN_A64_H_
#define V8_A64_LITHIUM_CODEGEN_A64_H_
#include "a64/lithium-a64.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "deoptimizer.h"
#include "lithium-codegen.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "v8utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class BranchGenerator;
class LCodeGen: public LCodeGenBase {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
translations_(info->zone()),
deferred_(8, info->zone()),
osr_pc_offset_(-1),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
Scope* scope() const { return scope_; }
int LookupDestination(int block_id) const {
return chunk()->LookupDestination(block_id);
}
bool IsNextEmittedBlock(int block_id) const {
return LookupDestination(block_id) == GetNextEmittedBlock();
}
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub() ||
info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
LinkRegisterStatus GetLinkRegisterState() const {
return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
}
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code);
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32I(LOperand* op);
Operand ToOperand32U(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// TODO(jbramley): Examine these helpers and check that they make sense.
// IsInteger32Constant returns true for smi constants, for example.
bool IsInteger32Constant(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
DoubleRegister ToDoubleRegister(LOperand* op) const;
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
// Return a double scratch register which can be used locally
// when generating code for a lithium instruction.
DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
Label* exit,
Label* allocation_entry);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
void DoDeferredNumberTagU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredTaggedToI(LTaggedToI* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void DoGap(LGap* instr);
// Generic version of EmitBranch. It contains some code to avoid emitting a
// branch on the next emitted basic block where we could just fall-through.
// You shouldn't use that directly but rather consider one of the helper like
// LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
template<class InstrType>
void EmitBranchGeneric(InstrType instr,
const BranchGenerator& branch);
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
template<class InstrType>
void EmitCompareAndBranch(InstrType instr,
Condition condition,
const Register& lhs,
const Operand& rhs);
template<class InstrType>
void EmitTestAndBranch(InstrType instr,
Condition condition,
const Register& value,
uint64_t mask);
template<class InstrType>
void EmitBranchIfNonZeroNumber(InstrType instr,
const FPRegister& value,
const FPRegister& scratch);
template<class InstrType>
void EmitBranchIfHeapNumber(InstrType instr,
const Register& value);
template<class InstrType>
void EmitBranchIfRoot(InstrType instr,
const Register& value,
Heap::RootListIndex index);
// Emits optimized code to deep-copy the contents of statically known object
// graphs (e.g. object literal boilerplate). Expects a pointer to the
// allocated destination object in the result register, and a pointer to the
// source object in the source register.
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
Register scratch,
int* offset,
AllocationSiteMode mode);
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
SmiCheck check_needed);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
MemOperand BuildSeqStringOperand(Register string,
Register temp,
LOperand* index,
String::Encoding encoding);
Deoptimizer::BailoutType DeoptimizeHeader(
LEnvironment* environment,
Deoptimizer::BailoutType* override_bailout_type);
void Deoptimize(LEnvironment* environment);
void Deoptimize(LEnvironment* environment,
Deoptimizer::BailoutType bailout_type);
void DeoptimizeIf(Condition cc, LEnvironment* environment);
void DeoptimizeIfZero(Register rt, LEnvironment* environment);
void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
void DeoptimizeIfRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void DeoptimizeIfNotRoot(Register rt,
Heap::RootListIndex index,
LEnvironment* environment);
void ApplyCheckIf(Condition cc, LBoundsCheck* check);
MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base,
Register scratch,
bool key_is_smi,
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
int additional_index);
void CalcKeyedArrayBaseRegister(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
void Abort(BailoutReason reason);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer);
void SaveCallerDoubles();
void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue.
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset.
void GenerateOsrPrologue();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void LoadContextFromDeferred(LOperand* context);
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context);
// Generate a direct call to a known function.
// If the function is already loaded into x1 by the caller, function_reg may
// be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
// automatically load it.
void CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count,
int arity,
LInstruction* instr,
Register function_reg = NoReg);
// Support for recording safepoint and position information.
void RecordAndWritePosition(int position) V8_OVERRIDE;
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
void RecordSafepoint(Safepoint::DeoptMode mode);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode mode);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table itself is
// emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters:
codegen_->masm_->PushSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PushSafepointRegisters();
codegen_->masm_->PushSafepointFPRegisters();
break;
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters:
codegen_->masm_->PopSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PopSafepointFPRegisters();
codegen_->masm_->PopSafepointRegisters();
break;
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
// This is the abstract class used by EmitBranchGeneric.
// It is used to emit code for conditional branching. The Emit() function
// emits code to branch when the condition holds and EmitInverted() emits
// the branch when the inverted condition is verified.
//
// For actual examples of condition see the concrete implementation in
// lithium-codegen-a64.cc (e.g. BranchOnCondition, CompareAndBranch).
class BranchGenerator BASE_EMBEDDED {
public:
explicit BranchGenerator(LCodeGen* codegen)
: codegen_(codegen) { }
virtual ~BranchGenerator() { }
virtual void Emit(Label* label) const = 0;
virtual void EmitInverted(Label* label) const = 0;
protected:
MacroAssembler* masm() const { return codegen_->masm(); }
LCodeGen* codegen_;
};
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_CODEGEN_A64_H_

326
deps/v8/src/a64/lithium-gap-resolver-a64.cc

@ -1,326 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "a64/lithium-gap-resolver-a64.h"
#include "a64/lithium-codegen-a64.h"
namespace v8 {
namespace internal {
// We use the root register to spill a value while breaking a cycle in parallel
// moves. We don't need access to roots while resolving the move list and using
// the root register has two advantages:
// - It is not in crankshaft allocatable registers list, so it can't interfere
// with any of the moves we are resolving.
// - We don't need to push it on the stack, as we can reload it with its value
// once we have resolved a cycle.
#define kSavedValue root
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
saved_destination_(NULL), need_to_restore_root_(false) { }
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when we reach this move again.
PerformMove(i);
if (in_cycle_) RestoreValue();
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
if (!move.IsEliminated()) {
ASSERT(move.source()->IsConstantOperand());
EmitMove(i);
}
}
if (need_to_restore_root_) {
ASSERT(kSavedValue.Is(root));
__ InitializeRootRegister();
need_to_restore_root_ = false;
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
LMoveOperands& current_move = moves_[index];
ASSERT(!current_move.IsPending());
ASSERT(!current_move.IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(current_move.source() != NULL); // Otherwise it will look eliminated.
LOperand* destination = current_move.destination();
current_move.set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
current_move.set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
void LGapResolver::BreakCycle(int index) {
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
// We use a register which is not allocatable by crankshaft to break the cycle
// to be sure it doesn't interfere with the moves we are resolving.
ASSERT(!kSavedValue.IsAllocatable());
need_to_restore_root_ = true;
// We save in a register the source of that move and we remember its
// destination. Then we mark this move as resolved so the cycle is
// broken and we can perform the other moves.
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ Mov(kSavedValue, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
// TODO(all): We should use a double register to store the value to avoid
// the penalty of the mov across register banks. We are going to reserve
// d31 to hold 0.0 value. We could clobber this register while breaking the
// cycle and restore it after like we do with the root register.
// LGapResolver::RestoreValue() will need to be updated as well when we'll
// do that.
__ Fmov(kSavedValue, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ Ldr(kSavedValue, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// Mark this move as resolved.
// This move will be actually performed by moving the saved value to this
// move's destination in LGapResolver::RestoreValue().
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
if (saved_destination_->IsRegister()) {
__ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedValue);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ Mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ Str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ Ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
EmitStackSlotMove(index);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsSmi(constant_source)) {
__ Mov(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(dst, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (destination->IsDoubleRegister()) {
DoubleRegister result = cgen_->ToDoubleRegister(destination);
__ Fmov(result, cgen_->ToDouble(constant_source));
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
need_to_restore_root_ = true;
if (cgen_->IsSmi(constant_source)) {
__ Mov(kSavedValue, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32Constant(constant_source)) {
__ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
} else {
__ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
}
__ Str(kSavedValue, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ Fmov(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ Str(src, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleStackSlot()) {
MemOperand src = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ Ldr(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
EmitStackSlotMove(index);
}
} else {
UNREACHABLE();
}
// The move has been emitted, we can eliminate it.
moves_[index].Eliminate();
}
void LGapResolver::EmitStackSlotMove(int index) {
// We need a temp register to perform a stack slot to stack slot move, and
// the register must not be involved in breaking cycles.
// Use the Crankshaft double scratch register as the temporary.
DoubleRegister temp = crankshaft_fp_scratch;
LOperand* src = moves_[index].source();
LOperand* dst = moves_[index].destination();
ASSERT(src->IsStackSlot());
ASSERT(dst->IsStackSlot());
__ Ldr(temp, cgen_->ToMemOperand(src));
__ Str(temp, cgen_->ToMemOperand(dst));
}
} } // namespace v8::internal

90
deps/v8/src/a64/lithium-gap-resolver-a64.h

@ -1,90 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#define V8_A64_LITHIUM_GAP_RESOLVER_A64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Emit a move from one stack slot to another.
void EmitStackSlotMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
// We use the root register as a scratch in a few places. When that happens,
// this flag is set to indicate that it needs to be restored.
bool need_to_restore_root_;
};
} } // namespace v8::internal
#endif // V8_A64_LITHIUM_GAP_RESOLVER_A64_H_

1647
deps/v8/src/a64/macro-assembler-a64-inl.h

File diff suppressed because it is too large

4975
deps/v8/src/a64/macro-assembler-a64.cc

File diff suppressed because it is too large

2238
deps/v8/src/a64/macro-assembler-a64.h

File diff suppressed because it is too large

1730
deps/v8/src/a64/regexp-macro-assembler-a64.cc

File diff suppressed because it is too large

315
deps/v8/src/a64/regexp-macro-assembler-a64.h

@ -1,315 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#define V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_
#include "a64/assembler-a64.h"
#include "a64/assembler-a64-inl.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
class RegExpMacroAssemblerA64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerA64(Mode mode, int registers_to_save, Zone* zone);
virtual ~RegExpMacroAssemblerA64();
virtual int stack_limit_slack();
virtual void AdvanceCurrentPosition(int by);
virtual void AdvanceRegister(int reg, int by);
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(unsigned c, Label* on_equal);
virtual void CheckCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
virtual void CheckNotAtStart(Label* on_not_at_start);
virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,
uc16 mask,
Label* on_not_equal);
virtual void CheckCharacterInRange(uc16 from,
uc16 to,
Label* on_in_range);
virtual void CheckCharacterNotInRange(uc16 from,
uc16 to,
Label* on_not_in_range);
virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
virtual void IfRegisterEqPos(int reg, Label* if_eq);
virtual IrregexpImplementation Implementation();
virtual void LoadCurrentCharacter(int cp_offset,
Label* on_end_of_input,
bool check_bounds = true,
int characters = 1);
virtual void PopCurrentPosition();
virtual void PopRegister(int register_index);
virtual void PushBacktrack(Label* label);
virtual void PushCurrentPosition();
virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
virtual bool CanReadUnaligned();
// Called from RegExp if the stack-guard is triggered.
// If the code object is relocated, the return address is fixed before
// returning.
static int CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame,
int start_offset,
const byte** input_start,
const byte** input_end);
private:
// Above the frame pointer - Stored registers and stack passed parameters.
// Callee-saved registers x19-x29, where x29 is the old frame pointer.
static const int kCalleeSavedRegisters = 0;
// Return address.
// It is placed above the 11 callee-saved registers.
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameter placed by caller.
static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
static const int kDirectCall = kCalleeSavedRegisters - kPointerSize;
static const int kStackBase = kDirectCall - kPointerSize;
static const int kOutputSize = kStackBase - kPointerSize;
static const int kInput = kOutputSize - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessCounter = kInput - kPointerSize;
// First position register address on the stack. Following positions are
// below it. A position is a 32 bit value.
static const int kFirstRegisterOnStack = kSuccessCounter - kWRegSizeInBytes;
// A capture is a 64 bit value holding two position.
static const int kFirstCaptureOnStack = kSuccessCounter - kXRegSizeInBytes;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
// When initializing registers to a non-position value we can unroll
// the loop. Set the limit of registers to unroll.
static const int kNumRegistersToUnroll = 16;
// We are using x0 to x7 as a register cache. Each hardware register must
// contain one capture, that is two 32 bit registers. We can cache at most
// 16 registers.
static const int kNumCachedRegisters = 16;
// Load a number of characters at the given offset from the
// current position, into the current-character register.
void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
// Check whether preemption has been requested.
void CheckPreemption();
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
// Location of a 32 bit position register.
MemOperand register_location(int register_index);
// Location of a 64 bit capture, combining two position registers.
MemOperand capture_location(int register_index, Register scratch);
// Register holding the current input position as negative offset from
// the end of the string.
Register current_input_offset() { return w21; }
// The register containing the current character after LoadCurrentCharacter.
Register current_character() { return w22; }
// Register holding address of the end of the input string.
Register input_end() { return x25; }
// Register holding address of the start of the input string.
Register input_start() { return x26; }
// Register holding the offset from the start of the string where we should
// start matching.
Register start_offset() { return w27; }
// Pointer to the output array's first element.
Register output_array() { return x28; }
// Register holding the frame address. Local variables, parameters and
// regexp registers are addressed relative to this.
Register frame_pointer() { return fp; }
// The register containing the backtrack stack top. Provides a meaningful
// name to the register.
Register backtrack_stackpointer() { return x23; }
// Register holding pointer to the current code object.
Register code_pointer() { return x20; }
// Register holding the value used for clearing capture registers.
Register non_position_value() { return w24; }
// The top 32 bit of this register is used to store this value
// twice. This is used for clearing more than one register at a time.
Register twice_non_position_value() { return x24; }
// Byte size of chars in the string to match (decided by the Mode argument)
int char_size() { return static_cast<int>(mode_); }
// Equivalent to a conditional branch to the label, unless the label
// is NULL, in which case it is a conditional Backtrack.
void BranchOrBacktrack(Condition condition, Label* to);
// Compares reg against immmediate before calling BranchOrBacktrack.
// It makes use of the Cbz and Cbnz instructions.
void CompareAndBranchOrBacktrack(Register reg,
int immediate,
Condition condition,
Label* to);
inline void CallIf(Label* to, Condition condition);
// Save and restore the link register on the stack in a way that
// is GC-safe.
inline void SaveLinkRegister();
inline void RestoreLinkRegister();
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer by a word size and stores the register's value there.
inline void Push(Register source);
// Pops a value from the backtrack stack. Reads the word at the stack pointer
// and increments it by a word size.
inline void Pop(Register target);
// This state indicates where the register actually is.
enum RegisterState {
STACKED, // Resides in memory.
CACHED_LSW, // Least Significant Word of a 64 bit hardware register.
CACHED_MSW // Most Significant Word of a 64 bit hardware register.
};
RegisterState GetRegisterState(int register_index) {
ASSERT(register_index >= 0);
if (register_index >= kNumCachedRegisters) {
return STACKED;
} else {
if ((register_index % 2) == 0) {
return CACHED_LSW;
} else {
return CACHED_MSW;
}
}
}
// Store helper that takes the state of the register into account.
inline void StoreRegister(int register_index, Register source);
// Returns a hardware W register that holds the value of the capture
// register.
//
// This function will try to use an existing cache register (w0-w7) for the
// result. Otherwise, it will load the value into maybe_result.
//
// If the returned register is anything other than maybe_result, calling code
// must not write to it.
inline Register GetRegister(int register_index, Register maybe_result);
// Returns the harware register (x0-x7) holding the value of the capture
// register.
// This assumes that the state of the register is not STACKED.
inline Register GetCachedRegister(int register_index);
Isolate* isolate() const { return masm_->isolate(); }
MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16).
Mode mode_;
// One greater than maximal register index actually used.
int num_registers_;
// Number of registers to output at the end (the saved registers
// are always 0..num_saved_registers_-1)
int num_saved_registers_;
// Labels used internally.
Label entry_label_;
Label start_label_;
Label success_label_;
Label backtrack_label_;
Label exit_label_;
Label check_preempt_label_;
Label stack_overflow_label_;
};
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_A64_REGEXP_MACRO_ASSEMBLER_A64_H_

3414
deps/v8/src/a64/simulator-a64.cc

File diff suppressed because it is too large

868
deps/v8/src/a64/simulator-a64.h

@ -1,868 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_SIMULATOR_A64_H_
#define V8_A64_SIMULATOR_A64_H_
#include <stdarg.h>
#include <vector>
#include "v8.h"
#include "globals.h"
#include "utils.h"
#include "allocation.h"
#include "assembler.h"
#include "a64/assembler-a64.h"
#include "a64/decoder-a64.h"
#include "a64/disasm-a64.h"
#include "a64/instrument-a64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native A64 platform.
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
typedef int (*a64_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type a64_regexp_matcher.
// The ninth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<a64_regexp_matcher>(entry)( \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static void UnregisterCTryCatch() { }
};
#else // !defined(USE_SIMULATOR)
enum ReverseByteMode {
Reverse16 = 0,
Reverse32 = 1,
Reverse64 = 2
};
// The proper way to initialize a simulated system register (such as NZCV) is as
// follows:
// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
class SimSystemRegister {
public:
// The default constructor represents a register which has no writable bits.
// It is not possible to set its value to anything other than 0.
SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
uint32_t RawValue() const {
return value_;
}
void SetRawValue(uint32_t new_value) {
value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
}
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, value_);
}
int32_t SignedBits(int msb, int lsb) const {
return signed_bitextract_32(msb, lsb, value_);
}
void SetBits(int msb, int lsb, uint32_t bits);
// Default system register values.
static SimSystemRegister DefaultValueFor(SystemRegister id);
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
uint32_t Name() const { return Func(HighBit, LowBit); } \
void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
#undef DEFINE_ZERO_BITS
#undef DEFINE_GETTER
protected:
// Most system registers only implement a few of the bits in the word. Other
// bits are "read-as-zero, write-ignored". The write_ignore_mask argument
// describes the bits which are not modifiable.
SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
: value_(value), write_ignore_mask_(write_ignore_mask) { }
uint32_t value_;
uint32_t write_ignore_mask_;
};
// Represent a register (r0-r31, v0-v31).
template<int kSizeInBytes>
class SimRegisterBase {
public:
template<typename T>
void Set(T new_value, unsigned size = sizeof(T)) {
ASSERT(size <= kSizeInBytes);
ASSERT(size <= sizeof(new_value));
// All AArch64 registers are zero-extending; Writing a W register clears the
// top bits of the corresponding X register.
memset(value_, 0, kSizeInBytes);
memcpy(value_, &new_value, size);
}
// Copy 'size' bytes of the register to the result, and zero-extend to fill
// the result.
template<typename T>
T Get(unsigned size = sizeof(T)) const {
ASSERT(size <= kSizeInBytes);
T result;
memset(&result, 0, sizeof(result));
memcpy(&result, value_, size);
return result;
}
protected:
uint8_t value_[kSizeInBytes];
};
typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
typedef SimRegisterBase<kDRegSizeInBytes> SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
explicit Simulator(Decoder* decoder,
Isolate* isolate = NULL,
FILE* stream = stderr);
~Simulator();
// System functions.
static void Initialize(Isolate* isolate);
static Simulator* current(v8::internal::Isolate* isolate);
class CallArgument;
// Call an arbitrary function taking an arbitrary number of arguments. The
// varargs list must be a set of arguments with type CallArgument, and
// terminated by CallArgument::End().
void CallVoid(byte* entry, CallArgument* args);
// Like CallVoid, but expect a return value.
int64_t CallInt64(byte* entry, CallArgument* args);
double CallDouble(byte* entry, CallArgument* args);
// V8 calls into generated JS code with 5 parameters and into
// generated RegExp code with 10 parameters. These are convenience functions,
// which set up the simulator state and grab the result on return.
int64_t CallJS(byte* entry,
byte* function_entry,
JSFunction* func,
Object* revc,
int64_t argc,
Object*** argv);
int64_t CallRegExp(byte* entry,
String* input,
int64_t start_offset,
const byte* input_start,
const byte* input_end,
int* output,
int64_t output_size,
Address stack_base,
int64_t direct_call,
void* return_address,
Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
// functions.
//
// Only arguments up to 64 bits in size are supported.
class CallArgument {
public:
template<typename T>
explicit CallArgument(T argument) {
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = X_ARG;
}
explicit CallArgument(double argument) {
ASSERT(sizeof(argument) == sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
explicit CallArgument(float argument) {
// TODO(all): CallArgument(float) is untested, remove this check once
// tested.
UNIMPLEMENTED();
// Make the D register a NaN to try to trap errors if the callee expects a
// double. If it expects a float, the callee should ignore the top word.
ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
// Write the float payload to the S register.
ASSERT(sizeof(argument) <= sizeof(bits_));
memcpy(&bits_, &argument, sizeof(argument));
type_ = D_ARG;
}
// This indicates the end of the arguments list, so that CallArgument
// objects can be passed into varargs functions.
static CallArgument End() { return CallArgument(); }
int64_t bits() const { return bits_; }
bool IsEnd() const { return type_ == NO_ARG; }
bool IsX() const { return type_ == X_ARG; }
bool IsD() const { return type_ == D_ARG; }
private:
enum CallArgumentType { X_ARG, D_ARG, NO_ARG };
// All arguments are aligned to at least 64 bits and we don't support
// passing bigger arguments, so the payload size can be fixed at 64 bits.
int64_t bits_;
CallArgumentType type_;
CallArgument() { type_ = NO_ARG; }
};
// Start the debugging command line.
void Debug();
bool GetValue(const char* desc, int64_t* value);
bool PrintValue(const char* desc);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
// Accessor to the internal simulator stack area.
uintptr_t StackLimit() const;
void ResetState();
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
// Run the simulator.
static const Instruction* kEndOfSimAddress;
void DecodeInstruction();
void Run();
void RunFrom(Instruction* start);
// Simulation helpers.
template <typename T>
void set_pc(T new_pc) {
ASSERT(sizeof(T) == sizeof(pc_));
memcpy(&pc_, &new_pc, sizeof(T));
pc_modified_ = true;
}
Instruction* pc() { return pc_; }
void increment_pc() {
if (!pc_modified_) {
pc_ = pc_->NextInstruction();
}
pc_modified_ = false;
}
void ExecuteInstruction() {
ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext();
decoder_->Decode(pc_);
LogProcessorState();
increment_pc();
CheckBreakpoints();
}
// Declare all Visitor functions.
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
// Register accessors.
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
template<typename T>
T reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
T result;
memset(&result, 0, sizeof(result));
return result;
}
return registers_[code].Get<T>(size_in_bytes);
}
// Like reg(), but infer the access size from the template type.
template<typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<T>(sizeof(T) * 8, code, r31mode);
}
// Common specialized accessors for the reg() template.
int32_t wreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int32_t>(code, r31mode);
}
int64_t xreg(unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(code, r31mode);
}
int64_t reg(unsigned size, unsigned code,
Reg31Mode r31mode = Reg31IsZeroRegister) const {
return reg<int64_t>(size, code, r31mode);
}
// Write 'size' bits of 'value' into an integer register. The value is
// zero-extended. This behaviour matches AArch64 register writes.
//
// The only supported values of 'size' are kXRegSize and kWRegSize.
template<typename T>
void set_reg(unsigned size, unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kXRegSize) || (size == kWRegSize));
ASSERT(code < kNumberOfRegisters);
if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
return;
}
return registers_[code].Set(value, size_in_bytes);
}
// Like set_reg(), but infer the access size from the template type.
template<typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(sizeof(value) * 8, code, value, r31mode);
}
// Common specialized accessors for the set_reg() template.
void set_wreg(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kWRegSize, code, value, r31mode);
}
void set_xreg(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg(kXRegSize, code, value, r31mode);
}
// Commonly-used special cases.
template<typename T>
void set_lr(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(kLinkRegCode, value);
}
template<typename T>
void set_sp(T value) {
ASSERT(sizeof(T) == kPointerSize);
set_reg(31, value, Reg31IsStackPointer);
}
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
int64_t fp() {
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
}
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
Address get_sp() { return reg<Address>(31, Reg31IsStackPointer); }
// Return 'size' bits of the value of a floating-point register, as the
// specified type. The value is zero-extended to fill the result.
//
// The only supported values of 'size' are kDRegSize and kSRegSize.
template<typename T>
T fpreg(unsigned size, unsigned code) const {
unsigned size_in_bytes = size / 8;
ASSERT(size_in_bytes <= sizeof(T));
ASSERT((size == kDRegSize) || (size == kSRegSize));
ASSERT(code < kNumberOfFPRegisters);
return fpregisters_[code].Get<T>(size_in_bytes);
}
// Like fpreg(), but infer the access size from the template type.
template<typename T>
T fpreg(unsigned code) const {
return fpreg<T>(sizeof(T) * 8, code);
}
// Common specialized accessors for the fpreg() template.
float sreg(unsigned code) const {
return fpreg<float>(code);
}
uint32_t sreg_bits(unsigned code) const {
return fpreg<uint32_t>(code);
}
double dreg(unsigned code) const {
return fpreg<double>(code);
}
uint64_t dreg_bits(unsigned code) const {
return fpreg<uint64_t>(code);
}
double fpreg(unsigned size, unsigned code) const {
switch (size) {
case kSRegSize: return sreg(code);
case kDRegSize: return dreg(code);
default:
UNREACHABLE();
return 0.0;
}
}
// Write 'value' into a floating-point register. The value is zero-extended.
// This behaviour matches AArch64 register writes.
template<typename T>
void set_fpreg(unsigned code, T value) {
ASSERT((sizeof(value) == kDRegSizeInBytes) ||
(sizeof(value) == kSRegSizeInBytes));
ASSERT(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value, sizeof(value));
}
// Common specialized accessors for the set_fpreg() template.
void set_sreg(unsigned code, float value) {
set_fpreg(code, value);
}
void set_sreg_bits(unsigned code, uint32_t value) {
set_fpreg(code, value);
}
void set_dreg(unsigned code, double value) {
set_fpreg(code, value);
}
void set_dreg_bits(unsigned code, uint64_t value) {
set_fpreg(code, value);
}
bool N() { return nzcv_.N() != 0; }
bool Z() { return nzcv_.Z() != 0; }
bool C() { return nzcv_.C() != 0; }
bool V() { return nzcv_.V() != 0; }
SimSystemRegister& nzcv() { return nzcv_; }
// TODO(jbramley): Find a way to make the fpcr_ members return the proper
// types, so this accessor is not necessary.
FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
SimSystemRegister& fpcr() { return fpcr_; }
// Debug helpers
// Simulator breakpoints.
struct Breakpoint {
Instruction* location;
bool enabled;
};
std::vector<Breakpoint> breakpoints_;
void SetBreakpoint(Instruction* breakpoint);
void ListBreakpoints();
void CheckBreakpoints();
// Helpers for the 'next' command.
// When this is set, the Simulator will insert a breakpoint after the next BL
// instruction it meets.
bool break_on_next_;
// Check if the Simulator should insert a break after the current instruction
// for the 'next' command.
void CheckBreakNext();
// Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count);
void PrintSystemRegisters(bool print_all = false);
void PrintRegisters(bool print_all_regs = false);
void PrintFPRegisters(bool print_all_regs = false);
void PrintProcessorState();
void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void LogSystemRegisters() {
if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
}
void LogRegisters() {
if (log_parameters_ & LOG_REGS) PrintRegisters();
}
void LogFPRegisters() {
if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
}
void LogProcessorState() {
LogSystemRegisters();
LogRegisters();
LogFPRegisters();
}
void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
}
int log_parameters() { return log_parameters_; }
void set_log_parameters(int new_parameters) {
if (new_parameters & LOG_DISASM) {
decoder_->InsertVisitorBefore(print_disasm_, this);
} else {
decoder_->RemoveVisitor(print_disasm_);
}
log_parameters_ = new_parameters;
}
static inline const char* WRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* XRegNameForCode(unsigned code,
Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* SRegNameForCode(unsigned code);
static inline const char* DRegNameForCode(unsigned code);
static inline const char* VRegNameForCode(unsigned code);
static inline int CodeFromName(const char* name);
protected:
// Simulation helpers ------------------------------------
bool ConditionPassed(Condition cond) {
switch (cond) {
case eq:
return Z();
case ne:
return !Z();
case hs:
return C();
case lo:
return !C();
case mi:
return N();
case pl:
return !N();
case vs:
return V();
case vc:
return !V();
case hi:
return C() && !Z();
case ls:
return !(C() && !Z());
case ge:
return N() == V();
case lt:
return N() != V();
case gt:
return !Z() && (N() == V());
case le:
return !(!Z() && (N() == V()));
case nv: // Fall through.
case al:
return true;
default:
UNREACHABLE();
return false;
}
}
bool ConditionFailed(Condition cond) {
return !ConditionPassed(cond);
}
void AddSubHelper(Instruction* instr, int64_t op2);
int64_t AddWithCarry(unsigned reg_size,
bool set_flags,
int64_t src1,
int64_t src2,
int64_t carry_in = 0);
void LogicalHelper(Instruction* instr, int64_t op2);
void ConditionalCompareHelper(Instruction* instr, int64_t op2);
void LoadStoreHelper(Instruction* instr,
int64_t offset,
AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
uint8_t* LoadStoreAddress(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode);
void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
uint8_t MemoryRead8(uint8_t* address);
uint16_t MemoryRead16(uint8_t* address);
uint32_t MemoryRead32(uint8_t* address);
float MemoryReadFP32(uint8_t* address);
uint64_t MemoryRead64(uint8_t* address);
double MemoryReadFP64(uint8_t* address);
void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
void MemoryWrite32(uint8_t* address, uint32_t value);
void MemoryWriteFP32(uint8_t* address, float value);
void MemoryWrite64(uint8_t* address, uint64_t value);
void MemoryWriteFP64(uint8_t* address, double value);
int64_t ShiftOperand(unsigned reg_size,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t Rotate(unsigned reg_width,
int64_t value,
Shift shift_type,
unsigned amount);
int64_t ExtendValue(unsigned reg_width,
int64_t value,
Extend extend_type,
unsigned left_shift = 0);
uint64_t ReverseBits(uint64_t value, unsigned num_bits);
uint64_t ReverseBytes(uint64_t value, ReverseByteMode mode);
void FPCompare(double val0, double val1);
double FPRoundInt(double value, FPRounding round_mode);
double FPToDouble(float value);
float FPToFloat(double value, FPRounding round_mode);
double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
int32_t FPToInt32(double value, FPRounding rmode);
int64_t FPToInt64(double value, FPRounding rmode);
uint32_t FPToUInt32(double value, FPRounding rmode);
uint64_t FPToUInt64(double value, FPRounding rmode);
template <typename T>
T FPMax(T a, T b);
template <typename T>
T FPMin(T a, T b);
template <typename T>
T FPMaxNM(T a, T b);
template <typename T>
T FPMinNM(T a, T b);
void CheckStackAlignment();
inline void CheckPCSComplianceAndRun();
#ifdef DEBUG
// Corruption values should have their least significant byte cleared to
// allow the code of the register being corrupted to be inserted.
static const uint64_t kCallerSavedRegisterCorruptionValue =
0xca11edc0de000000UL;
// This value is a NaN in both 32-bit and 64-bit FP.
static const uint64_t kCallerSavedFPRegisterCorruptionValue =
0x7ff000007f801000UL;
// This value is a mix of 32/64-bits NaN and "verbose" immediate.
static const uint64_t kDefaultCPURegisterCorruptionValue =
0x7ffbad007f8bad00UL;
void CorruptRegisters(CPURegList* list,
uint64_t value = kDefaultCPURegisterCorruptionValue);
void CorruptAllCallerSavedCPURegisters();
#endif
// Processor state ---------------------------------------
// Output stream.
FILE* stream_;
PrintDisassembler* print_disasm_;
// Instrumentation.
Instrument* instrument_;
// General purpose registers. Register 31 is the stack pointer.
SimRegister registers_[kNumberOfRegisters];
// Floating point registers
SimFPRegister fpregisters_[kNumberOfFPRegisters];
// Processor state
// bits[31, 27]: Condition flags N, Z, C, and V.
// (Negative, Zero, Carry, Overflow)
SimSystemRegister nzcv_;
// Floating-Point Control Register
SimSystemRegister fpcr_;
// Only a subset of FPCR features are supported by the simulator. This helper
// checks that the FPCR settings are supported.
//
// This is checked when floating-point instructions are executed, not when
// FPCR is set. This allows generated code to modify FPCR for external
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
ASSERT(fpcr().DN() == 0); // No default-NaN support.
ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()
// is irrelevant, and is not checked here.
}
static int CalcNFlag(uint64_t result, unsigned reg_size) {
return (result >> (reg_size - 1)) & 1;
}
static int CalcZFlag(uint64_t result) {
return result == 0;
}
static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack
byte* stack_;
static const intptr_t stack_protection_size_ = KB;
intptr_t stack_size_;
byte* stack_limit_;
// TODO(aleram): protect the stack.
Decoder* decoder_;
Decoder* disassembler_decoder_;
// Indicates if the pc has been modified by the instruction and should not be
// automatically incremented.
bool pc_modified_;
Instruction* pc_;
static const char* xreg_names[];
static const char* wreg_names[];
static const char* sreg_names[];
static const char* dreg_names[];
static const char* vreg_names[];
// Debugger input.
void set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
}
char* last_debugger_input() { return last_debugger_input_; }
char* last_debugger_input_;
private:
int log_parameters_;
Isolate* isolate_;
};
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->CallJS( \
FUNCTION_ADDR(entry), \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->CallRegExp( \
entry, \
p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
// See also 'class SimulatorStack' in arm/simulator-arm.h.
class SimulatorStack : public v8::internal::AllStatic {
public:
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
}
static uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress();
}
};
#endif // !defined(USE_SIMULATOR)
} } // namespace v8::internal
#endif // V8_A64_SIMULATOR_A64_H_

1548
deps/v8/src/a64/stub-cache-a64.cc

File diff suppressed because it is too large

112
deps/v8/src/a64/utils-a64.cc

@ -1,112 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if V8_TARGET_ARCH_A64
#include "a64/utils-a64.h"
namespace v8 {
namespace internal {
#define __ assm->
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
}
return count;
}
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
return CountLeadingZeros(~value, width) - 1;
}
}
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for A64 hosts.
ASSERT((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
}
return count;
}
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
ASSERT((width == 32) || (width == 64));
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return value;
}
int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_A64

109
deps/v8/src/a64/utils-a64.h

@ -1,109 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_A64_UTILS_A64_H_
#define V8_A64_UTILS_A64_H_
#include <cmath>
#include "v8.h"
#include "a64/constants-a64.h"
#define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
namespace v8 {
namespace internal {
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// Bits counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int MaskToBit(uint64_t mask);
// NaN tests.
inline bool IsSignallingNaN(double num) {
const uint64_t kFP64QuietNaNMask = 0x0008000000000000UL;
uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float num) {
const uint64_t kFP32QuietNaNMask = 0x00400000UL;
uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) {
return true;
}
return false;
}
template <typename T>
inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}
} } // namespace v8::internal
#endif // V8_A64_UTILS_A64_H_

1
deps/v8/src/allocation-tracker.cc

@ -267,7 +267,6 @@ AllocationTracker::UnresolvedLocation::~UnresolvedLocation() {
void AllocationTracker::UnresolvedLocation::Resolve() { void AllocationTracker::UnresolvedLocation::Resolve() {
if (script_.is_null()) return; if (script_.is_null()) return;
HandleScope scope(script_->GetIsolate());
info_->line = GetScriptLineNumber(script_, start_position_); info_->line = GetScriptLineNumber(script_, start_position_);
info_->column = GetScriptColumnNumber(script_, start_position_); info_->column = GetScriptColumnNumber(script_, start_position_);
} }

26
deps/v8/src/api.cc

@ -6293,25 +6293,6 @@ void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
} }
void V8::RunMicrotasks(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::HandleScope scope(i_isolate);
i::V8::RunMicrotasks(i_isolate);
}
void V8::EnqueueMicrotask(Isolate* isolate, Handle<Function> microtask) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i::Execution::EnqueueMicrotask(i_isolate, Utils::OpenHandle(*microtask));
}
void V8::SetAutorunMicrotasks(Isolate* isolate, bool autorun) {
reinterpret_cast<i::Isolate*>(isolate)->set_autorun_microtasks(autorun);
}
void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::V8::RemoveCallCompletedCallback(callback); i::V8::RemoveCallCompletedCallback(callback);
} }
@ -6971,13 +6952,6 @@ SnapshotObjectId HeapGraphNode::GetId() const {
int HeapGraphNode::GetSelfSize() const { int HeapGraphNode::GetSelfSize() const {
size_t size = ToInternal(this)->self_size();
CHECK(size <= static_cast<size_t>(internal::kMaxInt));
return static_cast<int>(size);
}
size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size(); return ToInternal(this)->self_size();
} }

1
deps/v8/src/arm/OWNERS

@ -1 +0,0 @@
rmcilroy@chromium.org

177
deps/v8/src/arm/code-stubs-arm.cc

@ -105,8 +105,8 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
void CreateAllocationSiteStub::InitializeInterfaceDescriptor( void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) { CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2, r3 }; static Register registers[] = { r2 };
descriptor->register_param_count_ = 2; descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers; descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL; descriptor->deoptimization_handler_ = NULL;
} }
@ -602,7 +602,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done; Label out_of_range, only_low, negate, done;
Register input_reg = source(); Register input_reg = source();
Register result_reg = destination(); Register result_reg = destination();
ASSERT(is_truncating());
int double_offset = offset(); int double_offset = offset();
// Account for saved regs if input is sp. // Account for saved regs if input is sp.
@ -3005,102 +3004,82 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) { static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states // Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and // are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic. // megamorphic.
// r0 : number of arguments to the construct function // r0 : number of arguments to the construct function
// r1 : the function to call // r1 : the function to call
// r2 : Feedback vector // r2 : cache cell for call target
// r3 : slot in feedback vector (Smi) Label initialize, done, miss, megamorphic, not_array_function;
Label check_array, initialize_array, initialize_non_array, megamorphic, done;
ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value()); masm->isolate()->heap()->undefined_value());
Heap::RootListIndex kMegamorphicRootIndex = Heap::kUndefinedValueRootIndex; ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->the_hole_value()); masm->isolate()->heap()->the_hole_value());
Heap::RootListIndex kUninitializedRootIndex = Heap::kTheHoleValueRootIndex;
ASSERT_EQ(*TypeFeedbackInfo::PremonomorphicSentinel(masm->isolate()),
masm->isolate()->heap()->null_value());
Heap::RootListIndex kPremonomorphicRootIndex = Heap::kNullValueRootIndex;
// Load the cache state into r4. // Load the cache state into r3.
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the // A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state. // function without changing the state.
__ cmp(r4, r1); __ cmp(r3, r1);
__ b(eq, &done);
__ CompareRoot(r4, kMegamorphicRootIndex);
__ b(eq, &done); __ b(eq, &done);
// Check if we're dealing with the Array function or not. // If we came here, we need to see if we are the array function.
__ LoadArrayFunction(r5); // If we didn't have a matching function, and we didn't find the megamorph
__ cmp(r1, r5); // sentinel, then we have in the cell either some other function or an
__ b(eq, &check_array); // AllocationSite. Do a map check on the object in ecx.
__ ldr(r5, FieldMemOperand(r3, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Non-array cache: Check the cache state. // Make sure the function is the Array() function
__ CompareRoot(r4, kPremonomorphicRootIndex); __ LoadArrayFunction(r3);
__ b(eq, &initialize_non_array); __ cmp(r1, r3);
__ CompareRoot(r4, kUninitializedRootIndex);
__ b(ne, &megamorphic); __ b(ne, &megamorphic);
// Non-array cache: Uninitialized -> premonomorphic. The sentinel is an
// immortal immovable object (null) so no write-barrier is needed.
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, kPremonomorphicRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done); __ jmp(&done);
// Array cache: Check the cache state to see if we're in a monomorphic __ bind(&miss);
// state where the state object is an AllocationSite object.
__ bind(&check_array);
__ ldr(r5, FieldMemOperand(r4, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(eq, &done);
// Array cache: Uninitialized or premonomorphic -> monomorphic.
__ CompareRoot(r4, kUninitializedRootIndex);
__ b(eq, &initialize_array);
__ CompareRoot(r4, kPremonomorphicRootIndex);
__ b(eq, &initialize_array);
// Both caches: Monomorphic -> megamorphic. The sentinel is an // A monomorphic miss (i.e, here the cache is not uninitialized) goes
// immortal immovable object (undefined) so no write-barrier is needed. // megamorphic.
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
__ b(eq, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic); __ bind(&megamorphic);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ LoadRoot(ip, kMegamorphicRootIndex); __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ jmp(&done); __ jmp(&done);
// Array cache: Uninitialized or premonomorphic -> monomorphic. // An uninitialized cache is patched with the function or sentinel to
__ bind(&initialize_array); // indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
// Make sure the function is the Array() function
__ LoadArrayFunction(r3);
__ cmp(r1, r3);
__ b(ne, &not_array_function);
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the cell
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out. // Arguments register must be smi-tagged to call out.
__ SmiTag(r0); __ SmiTag(r0);
__ Push(r3, r2, r1, r0); __ Push(r2, r1, r0);
CreateAllocationSiteStub create_stub; CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub); __ CallStub(&create_stub);
__ Pop(r3, r2, r1, r0); __ Pop(r2, r1, r0);
__ SmiUntag(r0); __ SmiUntag(r0);
} }
__ b(&done); __ b(&done);
// Non-array cache: Premonomorphic -> monomorphic. __ bind(&not_array_function);
__ bind(&initialize_non_array); __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); // No need for a write barrier here - cells are rescanned.
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r1, MemOperand(r4, 0));
__ Push(r4, r2, r1);
__ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Pop(r4, r2, r1);
__ bind(&done); __ bind(&done);
} }
@ -3108,8 +3087,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
void CallFunctionStub::Generate(MacroAssembler* masm) { void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call // r1 : the function to call
// r2 : feedback vector // r2 : cache cell for call target
// r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function, wrap, cont; Label slow, non_function, wrap, cont;
if (NeedsChecks()) { if (NeedsChecks()) {
@ -3118,7 +3096,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(r1, &non_function); __ JumpIfSmi(r1, &non_function);
// Goto slow case if we do not have a function. // Goto slow case if we do not have a function.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
if (RecordCallTarget()) { if (RecordCallTarget()) {
@ -3166,14 +3144,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// If there is a call target cache, mark it megamorphic in the // If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable // non-function case. MegamorphicSentinel is an immortal immovable
// object (undefined) so no write barrier is needed. // object (undefined) so no write barrier is needed.
ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->undefined_value()); masm->isolate()->heap()->undefined_value());
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize)); __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
} }
// Check for function proxy. // Check for function proxy.
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function); __ b(ne, &non_function);
__ push(r1); // put proxy as additional argument __ push(r1); // put proxy as additional argument
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
@ -3213,14 +3190,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) { void CallConstructStub::Generate(MacroAssembler* masm) {
// r0 : number of arguments // r0 : number of arguments
// r1 : the function to call // r1 : the function to call
// r2 : feedback vector // r2 : cache cell for call target
// r3 : (only if r2 is not undefined) slot in feedback vector (Smi)
Label slow, non_function_call; Label slow, non_function_call;
// Check that the function is not a smi. // Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call); __ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction. // Check that the function is a JSFunction.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
if (RecordCallTarget()) { if (RecordCallTarget()) {
@ -3228,7 +3204,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
} }
// Jump to the function-specific construct stub. // Jump to the function-specific construct stub.
Register jmp_reg = r4; Register jmp_reg = r3;
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(jmp_reg, FieldMemOperand(jmp_reg, __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset)); SharedFunctionInfo::kConstructStubOffset));
@ -3236,10 +3212,10 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r0: number of arguments // r0: number of arguments
// r1: called object // r1: called object
// r4: object type // r3: object type
Label do_call; Label do_call;
__ bind(&slow); __ bind(&slow);
__ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function_call); __ b(ne, &non_function_call);
__ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
__ jmp(&do_call); __ jmp(&do_call);
@ -5199,7 +5175,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub); __ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) { } else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey. // We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot). // Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1)); __ add(r3, r3, Operand(1));
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -5307,8 +5283,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY) // -- r0 : argc (only if argument_count_ == ANY)
// -- r1 : constructor // -- r1 : constructor
// -- r2 : feedback vector (fixed array or undefined) // -- r2 : type info cell
// -- r3 : slot index (if r2 is fixed array)
// -- sp[0] : return address // -- sp[0] : return address
// -- sp[4] : last argument // -- sp[4] : last argument
// ----------------------------------- // -----------------------------------
@ -5317,25 +5292,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps. // builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi. // Will both indicate a NULL and a Smi.
__ tst(r4, Operand(kSmiTagMask)); __ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction); __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE); __ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, kUnexpectedInitialMapForArrayFunction); __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in ebx or a valid fixed array. // We should either have undefined in ebx or a valid cell
Label okay_here; Label okay_here;
Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map(); Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex); __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &okay_here); __ b(eq, &okay_here);
__ ldr(r4, FieldMemOperand(r2, 0)); __ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r4, Operand(fixed_array_map)); __ cmp(r3, Operand(cell_map));
__ Assert(eq, kExpectedFixedArrayInRegisterR2); __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
// r3 should be a smi if we don't have undefined in r2
__ AssertSmi(r3);
__ bind(&okay_here); __ bind(&okay_here);
} }
@ -5343,10 +5314,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that. // Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex); __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info); __ b(eq, &no_info);
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3)); __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r2, FieldMemOperand(r2, FixedArray::kHeaderSize));
// If the feedback vector is undefined, or contains anything other than an // If the type cell is undefined, or contains anything other than an
// AllocationSite, call an array constructor that doesn't use AllocationSites. // AllocationSite, call an array constructor that doesn't use AllocationSites.
__ ldr(r4, FieldMemOperand(r2, 0)); __ ldr(r4, FieldMemOperand(r2, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex); __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
@ -5459,7 +5429,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = cp; Register context = cp;
int argc = ArgumentBits::decode(bit_field_); int argc = ArgumentBits::decode(bit_field_);
bool is_store = IsStoreBits::decode(bit_field_); bool restore_context = RestoreContextBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA; typedef FunctionCallbackArguments FCA;
@ -5537,20 +5507,15 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
AllowExternalCallThatCantCauseGC scope(masm); AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand( MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize); fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument MemOperand return_value_operand(fp,
int return_value_offset = 0; (2 + FCA::kReturnValueOffset) * kPointerSize);
if (is_store) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
__ CallApiFunctionAndReturn(api_function_address, __ CallApiFunctionAndReturn(api_function_address,
thunk_ref, thunk_ref,
kStackUnwindSpace, kStackUnwindSpace,
return_value_operand, return_value_operand,
&context_restore_operand); restore_context ?
&context_restore_operand : NULL);
} }

10
deps/v8/src/arm/debug-arm.cc

@ -265,10 +265,9 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-arm.cc). // Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r1 : function // -- r1 : function
// -- r2 : feedback array // -- r2 : cache cell for call target
// -- r3 : slot in feedback array
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), 0); Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
} }
@ -287,10 +286,9 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : number of arguments (not smi) // -- r0 : number of arguments (not smi)
// -- r1 : constructor function // -- r1 : constructor function
// -- r2 : feedback array // -- r2 : cache cell for call target
// -- r3 : feedback slot (smi)
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit() | r3.bit(), r0.bit()); Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
} }

167
deps/v8/src/arm/full-codegen-arm.cc

@ -130,9 +130,6 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_; CompilationInfo* info = info_;
handler_table_ = handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
InitializeFeedbackVector();
profiling_counter_ = isolate()->factory()->NewCell( profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function()); SetFunctionPosition(function());
@ -671,7 +668,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate()); Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id()); CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
__ tst(result_register(), result_register()); __ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through); Split(ne, if_true, if_false, fall_through);
} }
@ -1032,7 +1029,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback. // Record position before stub call for type feedback.
SetSourcePosition(clause->position()); SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, clause->CompareId()); CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
Label skip; Label skip;
@ -1077,7 +1074,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement"); Comment cmnt(masm_, "[ ForInStatement");
int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt); SetStatementPosition(stmt);
Label loop, exit; Label loop, exit;
@ -1167,13 +1163,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy; Label non_proxy;
__ bind(&fixed_array); __ bind(&fixed_array);
Handle<Object> feedback = Handle<Object>( Handle<Cell> cell = isolate()->factory()->NewCell(
Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
isolate()); isolate()));
StoreFeedbackVectorSlot(slot, feedback); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ Move(r1, FeedbackVector()); __ Move(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); __ str(r2, FieldMemOperand(r1, Cell::kValueOffset));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
@ -1482,7 +1478,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables. // variables.
switch (var->location()) { switch (var->location()) {
case Variable::UNALLOCATED: { case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable"); Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global // Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0. // object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand()); __ ldr(r0, GlobalObjectOperand());
@ -1495,8 +1491,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER: case Variable::PARAMETER:
case Variable::LOCAL: case Variable::LOCAL:
case Variable::CONTEXT: { case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable" Comment cmnt(masm_, var->IsContextSlot()
: "[ Stack variable"); ? "Context variable"
: "Stack variable");
if (var->binding_needs_init()) { if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and // var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are // refers to a potential outside binding. Currently those bindings are
@ -1559,12 +1556,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
} }
case Variable::LOOKUP: { case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Lookup variable");
Label done, slow; Label done, slow;
// Generate code for loading from variables potentially shadowed // Generate code for loading from variables potentially shadowed
// by eval-introduced variables. // by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow); __ bind(&slow);
Comment cmnt(masm_, "Lookup variable");
__ mov(r1, Operand(var->name())); __ mov(r1, Operand(var->name()));
__ Push(cp, r1); // Context and name. __ Push(cp, r1); // Context and name.
__ CallRuntime(Runtime::kLoadContextSlot, 2); __ CallRuntime(Runtime::kLoadContextSlot, 2);
@ -1695,7 +1692,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value); VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->value())); __ mov(r2, Operand(key->value()));
__ ldr(r1, MemOperand(sp)); __ ldr(r1, MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId()); CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS); PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else { } else {
VisitForEffect(value); VisitForEffect(value);
@ -2097,7 +2094,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(r1, MemOperand(sp, kPointerSize)); __ ldr(r1, MemOperand(sp, kPointerSize));
__ ldr(r0, MemOperand(sp, 2 * kPointerSize)); __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallIC(ic, TypeFeedbackId::None()); CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
__ mov(r1, r0); __ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize)); __ str(r1, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(1, CALL_AS_METHOD); CallFunctionStub stub(1, CALL_AS_METHOD);
@ -2294,7 +2291,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1. // Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
CallIC(ic, prop->PropertyFeedbackId()); CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
} }
@ -2321,7 +2318,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call); __ bind(&stub_call);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(op, mode);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ jmp(&done); __ jmp(&done);
@ -2398,7 +2396,8 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1); __ pop(r1);
BinaryOpICStub stub(op, mode); BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
context()->Plug(r0); context()->Plug(r0);
} }
@ -2436,7 +2435,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ mov(r1, r0); __ mov(r1, r0);
__ pop(r0); // Restore value. __ pop(r0); // Restore value.
__ mov(r2, Operand(prop->key()->AsLiteral()->value())); __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
CallStoreIC(); CallStoreIC(NOT_CONTEXTUAL);
break; break;
} }
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
@ -2456,60 +2455,41 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
} }
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
Variable* var, MemOperand location) {
__ str(result_register(), location);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
void FullCodeGenerator::EmitCallStoreContextSlot(
Handle<String> name, LanguageMode mode) {
__ push(r0); // Value.
__ mov(r1, Operand(name));
__ mov(r0, Operand(Smi::FromInt(mode)));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) { Token::Value op) {
if (var->IsUnallocated()) { if (var->IsUnallocated()) {
// Global var, const, or let. // Global var, const, or let.
__ mov(r2, Operand(var->name())); __ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand()); __ ldr(r1, GlobalObjectOperand());
CallStoreIC(); CallStoreIC(CONTEXTUAL);
} else if (op == Token::INIT_CONST) { } else if (op == Token::INIT_CONST) {
// Const initializers need a write barrier. // Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters. ASSERT(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) { if (var->IsStackLocal()) {
__ ldr(r1, StackOperand(var));
__ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
__ str(result_register(), StackOperand(var), eq);
} else {
ASSERT(var->IsContextSlot() || var->IsLookupSlot());
// Like var declarations, const declarations are hoisted to function
// scope. However, unlike var initializers, const initializers are
// able to drill a hole to that function context, even from inside a
// 'with' context. We thus bypass the normal static scope lookup for
// var->IsContextSlot().
__ push(r0); __ push(r0);
__ mov(r0, Operand(var->name())); __ mov(r0, Operand(var->name()));
__ Push(cp, r0); // Context and name. __ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3); __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, r1);
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ b(ne, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
} }
} else if (var->mode() == LET && op != Token::INIT_LET) { } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier. // Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) { if (var->IsLookupSlot()) {
EmitCallStoreContextSlot(var->name(), language_mode()); __ push(r0); // Value.
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} else { } else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot()); ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign; Label assign;
@ -2522,16 +2502,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1); __ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment. // Perform the assignment.
__ bind(&assign); __ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location); __ str(result_register(), location);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} }
} else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) { } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
// Assignment to var or initializing assignment to let/const // Assignment to var or initializing assignment to let/const
// in harmony mode. // in harmony mode.
if (var->IsLookupSlot()) { if (var->IsStackAllocated() || var->IsContextSlot()) {
EmitCallStoreContextSlot(var->name(), language_mode());
} else {
ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1); MemOperand location = VarOperand(var, r1);
if (generate_debug_code_ && op == Token::INIT_LET) { if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding. // Check for an uninitialized let binding.
@ -2539,7 +2523,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex); __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization); __ Check(eq, kLetBindingReInitialization);
} }
EmitStoreToStackLocalOrContextSlot(var, location); // Perform the assignment.
__ str(r0, location);
if (var->IsContextSlot()) {
__ mov(r3, r0);
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
}
} else {
ASSERT(var->IsLookupSlot());
__ push(r0); // Value.
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(language_mode())));
__ Push(cp, r1, r0); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreContextSlot, 4);
} }
} }
// Non-initializing assignments to consts are ignored. // Non-initializing assignments to consts are ignored.
@ -2557,7 +2555,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(r2, Operand(prop->key()->AsLiteral()->value())); __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1); __ pop(r1);
CallStoreIC(expr->AssignmentFeedbackId()); CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
@ -2574,7 +2572,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, expr->AssignmentFeedbackId()); CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
@ -2601,10 +2599,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code, void FullCodeGenerator::CallIC(Handle<Code> code,
ContextualMode mode,
TypeFeedbackId ast_id) { TypeFeedbackId ast_id) {
ic_total_count_++; ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that // All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly. // the debugger can patch them correctly.
ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ Call(code, RelocInfo::CODE_TARGET, ast_id, al, __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
NEVER_INLINE_TARGET_ADDRESS); NEVER_INLINE_TARGET_ADDRESS);
} }
@ -2716,15 +2716,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Object> uninitialized = Handle<Object> uninitialized =
TypeFeedbackInfo::UninitializedSentinel(isolate()); TypeFeedbackCells::UninitializedSentinel(isolate());
StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
__ Move(r2, FeedbackVector()); RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
__ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); __ mov(r2, Operand(cell));
// Record call targets in unoptimized code. // Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub); __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr); RecordJSReturnSite(expr);
// Restore context register. // Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2905,10 +2905,10 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code. // Record call targets in unoptimized code.
Handle<Object> uninitialized = Handle<Object> uninitialized =
TypeFeedbackInfo::UninitializedSentinel(isolate()); TypeFeedbackCells::UninitializedSentinel(isolate());
StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
__ Move(r2, FeedbackVector()); RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
__ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); __ mov(r2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET); CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@ -4411,7 +4411,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); CallIC(stub.GetCode(isolate()),
NOT_CONTEXTUAL,
expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ bind(&done); __ bind(&done);
@ -4440,7 +4442,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ mov(r2, Operand(prop->key()->AsLiteral()->value())); __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
__ pop(r1); __ pop(r1);
CallStoreIC(expr->CountStoreFeedbackId()); CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
if (!context()->IsEffect()) { if (!context()->IsEffect()) {
@ -4456,7 +4458,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_classic_mode() Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize() ? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, expr->CountStoreFeedbackId()); CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
if (!context()->IsEffect()) { if (!context()->IsEffect()) {
@ -4476,7 +4478,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest()); ASSERT(!context()->IsTest());
VariableProxy* proxy = expr->AsVariableProxy(); VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) { if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable"); Comment cmnt(masm_, "Global variable");
__ ldr(r0, GlobalObjectOperand()); __ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(proxy->name())); __ mov(r2, Operand(proxy->name()));
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
@ -4485,7 +4487,6 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG); PrepareForBailout(expr, TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) { } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
Comment cmnt(masm_, "[ Lookup slot");
Label done, slow; Label done, slow;
// Generate code for loading from variables potentially shadowed // Generate code for loading from variables potentially shadowed
@ -4647,7 +4648,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC. // Record position and call the compare IC.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, expr->CompareOperationFeedbackId()); CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
@ -4682,7 +4683,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
} else { } else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil); Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId()); CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through); Split(ne, if_true, if_false, fall_through);
} }

14
deps/v8/src/arm/ic-arm.cc

@ -333,7 +333,8 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
} }
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
ExtraICState extra_state) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
@ -341,7 +342,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
// Probe the stub cache. // Probe the stub cache.
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, extra_state,
Code::NORMAL, Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe( masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6); masm, flags, r0, r2, r3, r4, r5, r6);
@ -1159,7 +1162,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
} }
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
ExtraICState extra_ic_state) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver // -- r1 : receiver
@ -1168,7 +1172,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
// Get the receiver from the stack and probe the stub cache. // Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); Code::Flags flags = Code::ComputeFlags(
Code::HANDLER, MONOMORPHIC, extra_ic_state,
Code::NORMAL, Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe( masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6); masm, flags, r1, r2, r3, r4, r5, r6);

3
deps/v8/src/arm/lithium-arm.cc

@ -840,6 +840,7 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
void LChunkBuilder::VisitInstruction(HInstruction* current) { void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_; HInstruction* old_current = current_instruction_;
current_instruction_ = current; current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = NULL; LInstruction* instr = NULL;
if (current->CanReplaceWithDummyUses()) { if (current->CanReplaceWithDummyUses()) {
@ -1236,7 +1237,7 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(instr->right()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) { if (instr->RightIsPowerOf2()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegister(instr->left()); LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL); LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL);
return AssignEnvironment(DefineAsRegister(div)); return AssignEnvironment(DefineAsRegister(div));
} }

2
deps/v8/src/arm/lithium-arm.h

@ -2580,6 +2580,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
current_block_(NULL), current_block_(NULL),
next_block_(NULL), next_block_(NULL),
allocator_(allocator), allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL), instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(BailoutId::None()) { } pending_deoptimization_ast_id_(BailoutId::None()) { }
@ -2716,6 +2717,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HBasicBlock* current_block_; HBasicBlock* current_block_;
HBasicBlock* next_block_; HBasicBlock* next_block_;
LAllocator* allocator_; LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_; LInstruction* instruction_pending_deoptimization_environment_;
BailoutId pending_deoptimization_ast_id_; BailoutId pending_deoptimization_ast_id_;

100
deps/v8/src/arm/lithium-codegen-arm.cc

@ -277,8 +277,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value = HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value(); instructions_->at(code->instruction_index())->hydrogen_value();
RecordAndWritePosition( RecordAndWritePosition(value->position());
chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> " Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------", "-------------------- Deferred %s --------------------",
@ -907,7 +906,6 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory()); translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations); data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
Handle<FixedArray> literals = Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@ -1343,45 +1341,54 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
void LCodeGen::DoDivI(LDivI* instr) { void LCodeGen::DoDivI(LDivI* instr) {
if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) { if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
Register dividend = ToRegister(instr->left()); const Register dividend = ToRegister(instr->left());
HDiv* hdiv = instr->hydrogen(); const Register result = ToRegister(instr->result());
int32_t divisor = hdiv->right()->GetInteger32Constant(); int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
Register result = ToRegister(instr->result()); int32_t test_value = 0;
ASSERT(!result.is(dividend)); int32_t power = 0;
// Check for (0 / -x) that will produce negative zero. if (divisor > 0) {
if (hdiv->left()->RangeCanInclude(0) && divisor < 0 && test_value = divisor - 1;
hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { power = WhichPowerOf2(divisor);
__ cmp(dividend, Operand::Zero()); } else {
DeoptimizeIf(eq, instr->environment()); // Check for (0 / -x) that will produce negative zero.
} if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for (kMinInt / -1). __ cmp(dividend, Operand::Zero());
if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 && DeoptimizeIf(eq, instr->environment());
hdiv->CheckFlag(HValue::kCanOverflow)) { }
__ cmp(dividend, Operand(kMinInt)); // Check for (kMinInt / -1).
DeoptimizeIf(eq, instr->environment()); if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
} __ cmp(dividend, Operand(kMinInt));
// Deoptimize if remainder will not be 0. DeoptimizeIf(eq, instr->environment());
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && }
Abs(divisor) != 1) { test_value = - divisor - 1;
__ tst(dividend, Operand(Abs(divisor) - 1)); power = WhichPowerOf2(-divisor);
DeoptimizeIf(ne, instr->environment());
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
__ rsb(result, dividend, Operand(0));
return;
} }
int32_t shift = WhichPowerOf2(Abs(divisor));
if (shift == 0) { if (test_value != 0) {
__ mov(result, dividend); if (instr->hydrogen()->CheckFlag(
} else if (shift == 1) { HInstruction::kAllUsesTruncatingToInt32)) {
__ add(result, dividend, Operand(dividend, LSR, 31)); __ sub(result, dividend, Operand::Zero(), SetCC);
__ rsb(result, result, Operand::Zero(), LeaveCC, lt);
__ mov(result, Operand(result, ASR, power));
if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
return; // Don't fall through to "__ rsb" below.
} else {
// Deoptimize if remainder is not 0.
__ tst(dividend, Operand(test_value));
DeoptimizeIf(ne, instr->environment());
__ mov(result, Operand(dividend, ASR, power));
if (divisor < 0) __ rsb(result, result, Operand(0));
}
} else { } else {
__ mov(result, Operand(dividend, ASR, 31)); if (divisor < 0) {
__ add(result, dividend, Operand(result, LSR, 32 - shift)); __ rsb(result, dividend, Operand(0));
} else {
__ Move(result, dividend);
}
} }
if (shift > 0) __ mov(result, Operand(result, ASR, shift));
if (divisor < 0) __ rsb(result, result, Operand(0));
return; return;
} }
@ -4044,18 +4051,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
} }
Handle<Map> transition = instr->transition(); Handle<Map> transition = instr->transition();
SmiCheck check_needed =
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
Register value = ToRegister(instr->value()); Register value = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) { if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ SmiTst(value); __ SmiTst(value);
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
// We know that value is a smi now, so we can omit the check below.
check_needed = OMIT_SMI_CHECK;
} }
} else if (representation.IsDouble()) { } else if (representation.IsDouble()) {
ASSERT(transition.is_null()); ASSERT(transition.is_null());
@ -4085,6 +4086,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
// Do the store. // Do the store.
Register value = ToRegister(instr->value()); Register value = ToRegister(instr->value());
SmiCheck check_needed =
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
if (access.IsInobject()) { if (access.IsInobject()) {
MemOperand operand = FieldMemOperand(object, offset); MemOperand operand = FieldMemOperand(object, offset);
__ Store(value, operand, representation); __ Store(value, operand, representation);
@ -5245,7 +5249,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->size()->IsConstantOperand()) { if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); if (size <= Page::kMaxRegularHeapObjectSize) {
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
__ jmp(deferred->entry());
}
} else { } else {
Register size = ToRegister(instr->size()); Register size = ToRegister(instr->size());
__ Allocate(size, __ Allocate(size,

21
deps/v8/src/arm/macro-assembler-arm.cc

@ -2806,8 +2806,16 @@ void MacroAssembler::Check(Condition cond, BailoutReason reason) {
void MacroAssembler::Abort(BailoutReason reason) { void MacroAssembler::Abort(BailoutReason reason) {
Label abort_start; Label abort_start;
bind(&abort_start); bind(&abort_start);
#ifdef DEBUG // We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
const char* msg = GetBailoutReason(reason); const char* msg = GetBailoutReason(reason);
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
if (msg != NULL) { if (msg != NULL) {
RecordComment("Abort message: "); RecordComment("Abort message: ");
RecordComment(msg); RecordComment(msg);
@ -2819,24 +2827,25 @@ void MacroAssembler::Abort(BailoutReason reason) {
} }
#endif #endif
mov(r0, Operand(Smi::FromInt(reason))); mov(r0, Operand(p0));
push(r0);
mov(r0, Operand(Smi::FromInt(p1 - p0)));
push(r0); push(r0);
// Disable stub call restrictions to always allow calls to abort. // Disable stub call restrictions to always allow calls to abort.
if (!has_frame_) { if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just // We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one. // claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE); FrameScope scope(this, StackFrame::NONE);
CallRuntime(Runtime::kAbort, 1); CallRuntime(Runtime::kAbort, 2);
} else { } else {
CallRuntime(Runtime::kAbort, 1); CallRuntime(Runtime::kAbort, 2);
} }
// will not return here // will not return here
if (is_const_pool_blocked()) { if (is_const_pool_blocked()) {
// If the calling code cares about the exact number of // If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size // instructions generated, we insert padding here to keep the size
// of the Abort macro constant. // of the Abort macro constant.
static const int kExpectedAbortInstructions = 7; static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start); int abort_instructions = InstructionsGeneratedSince(&abort_start);
ASSERT(abort_instructions <= kExpectedAbortInstructions); ASSERT(abort_instructions <= kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) { while (abort_instructions++ < kExpectedAbortInstructions) {

4
deps/v8/src/arm/simulator-arm.h

@ -207,10 +207,6 @@ class Simulator {
void set_pc(int32_t value); void set_pc(int32_t value);
int32_t get_pc() const; int32_t get_pc() const;
Address get_sp() {
return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
}
// Accessor to the internal simulator stack area. // Accessor to the internal simulator stack area.
uintptr_t StackLimit() const; uintptr_t StackLimit() const;

63
deps/v8/src/arm/stub-cache-arm.cc

@ -783,14 +783,13 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function. // Generate call to api function.
void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, static void GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization, const CallOptimization& optimization,
Handle<Map> receiver_map, Handle<Map> receiver_map,
Register receiver, Register receiver,
Register scratch_in, Register scratch_in,
bool is_store, int argc,
int argc, Register* values) {
Register* values) {
ASSERT(!receiver.is(scratch_in)); ASSERT(!receiver.is(scratch_in));
__ push(receiver); __ push(receiver);
// Write the arguments to stack frame. // Write the arguments to stack frame.
@ -855,7 +854,7 @@ void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
__ mov(api_function_address, Operand(ref)); __ mov(api_function_address, Operand(ref));
// Jump to stub. // Jump to stub.
CallApiFunctionStub stub(is_store, call_data_undefined, argc); CallApiFunctionStub stub(true, call_data_undefined, argc);
__ TailCallStub(&stub); __ TailCallStub(&stub);
} }
@ -1076,6 +1075,15 @@ void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
} }
void LoadStubCompiler::GenerateLoadCallback(
const CallOptimization& call_optimization,
Handle<Map> receiver_map) {
GenerateFastApiCall(
masm(), call_optimization, receiver_map,
receiver(), scratch3(), 0, NULL);
}
void LoadStubCompiler::GenerateLoadCallback( void LoadStubCompiler::GenerateLoadCallback(
Register reg, Register reg,
Handle<ExecutableAccessorInfo> callback) { Handle<ExecutableAccessorInfo> callback) {
@ -1252,6 +1260,24 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
} }
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
const CallOptimization& call_optimization) {
HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
receiver(), holder, name);
Register values[] = { value() };
GenerateFastApiCall(
masm(), call_optimization, handle(object->map()),
receiver(), scratch3(), 1, values);
// Return the generated code.
return GetCode(kind(), Code::FAST, name);
}
#undef __ #undef __
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
@ -1310,6 +1336,21 @@ void StoreStubCompiler::GenerateStoreViaSetter(
Handle<Code> StoreStubCompiler::CompileStoreInterceptor( Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object, Handle<JSObject> object,
Handle<Name> name) { Handle<Name> name) {
Label miss;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
}
// Stub is never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
__ Push(receiver(), this->name(), value()); __ Push(receiver(), this->name(), value());
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
@ -1317,6 +1358,10 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 3, 1); __ TailCallExternalReference(store_ic_property, 3, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code. // Return the generated code.
return GetCode(kind(), Code::FAST, name); return GetCode(kind(), Code::FAST, name);
} }

11
deps/v8/src/arraybuffer.js

@ -57,17 +57,18 @@ function ArrayBufferSlice(start, end) {
var relativeStart = TO_INTEGER(start); var relativeStart = TO_INTEGER(start);
var first; var first;
var byte_length = %ArrayBufferGetByteLength(this);
if (relativeStart < 0) { if (relativeStart < 0) {
first = MathMax(this.byteLength + relativeStart, 0); first = MathMax(byte_length + relativeStart, 0);
} else { } else {
first = MathMin(relativeStart, this.byteLength); first = MathMin(relativeStart, byte_length);
} }
var relativeEnd = IS_UNDEFINED(end) ? this.byteLength : TO_INTEGER(end); var relativeEnd = IS_UNDEFINED(end) ? byte_length : TO_INTEGER(end);
var fin; var fin;
if (relativeEnd < 0) { if (relativeEnd < 0) {
fin = MathMax(this.byteLength + relativeEnd, 0); fin = MathMax(byte_length + relativeEnd, 0);
} else { } else {
fin = MathMin(relativeEnd, this.byteLength); fin = MathMin(relativeEnd, byte_length);
} }
if (fin < first) { if (fin < first) {

7
deps/v8/src/assembler.cc

@ -59,8 +59,6 @@
#include "ia32/assembler-ia32-inl.h" #include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h" #include "x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_A64
#include "a64/assembler-a64-inl.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
@ -75,8 +73,6 @@
#include "ia32/regexp-macro-assembler-ia32.h" #include "ia32/regexp-macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "x64/regexp-macro-assembler-x64.h" #include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/regexp-macro-assembler-a64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
@ -126,6 +122,7 @@ AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
if (FLAG_mask_constants_with_cookie && isolate != NULL) { if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = isolate->random_number_generator()->NextInt(); jit_cookie_ = isolate->random_number_generator()->NextInt();
} }
if (buffer == NULL) { if (buffer == NULL) {
// Do our own buffer management. // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) { if (buffer_size <= kMinimalBufferSize) {
@ -1339,8 +1336,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
#elif V8_TARGET_ARCH_IA32 #elif V8_TARGET_ARCH_IA32
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_A64
function = FUNCTION_ADDR(RegExpMacroAssemblerA64::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS

26
deps/v8/src/assembler.h

@ -1002,6 +1002,32 @@ class PreservePositionScope BASE_EMBEDDED {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Utility functions // Utility functions
inline bool is_intn(int x, int n) {
return -(1 << (n-1)) <= x && x < (1 << (n-1));
}
inline bool is_int8(int x) { return is_intn(x, 8); }
inline bool is_int16(int x) { return is_intn(x, 16); }
inline bool is_int18(int x) { return is_intn(x, 18); }
inline bool is_int24(int x) { return is_intn(x, 24); }
inline bool is_uintn(int x, int n) {
return (x & -(1 << n)) == 0;
}
inline bool is_uint2(int x) { return is_uintn(x, 2); }
inline bool is_uint3(int x) { return is_uintn(x, 3); }
inline bool is_uint4(int x) { return is_uintn(x, 4); }
inline bool is_uint5(int x) { return is_uintn(x, 5); }
inline bool is_uint6(int x) { return is_uintn(x, 6); }
inline bool is_uint8(int x) { return is_uintn(x, 8); }
inline bool is_uint10(int x) { return is_uintn(x, 10); }
inline bool is_uint12(int x) { return is_uintn(x, 12); }
inline bool is_uint16(int x) { return is_uintn(x, 16); }
inline bool is_uint24(int x) { return is_uintn(x, 24); }
inline bool is_uint26(int x) { return is_uintn(x, 26); }
inline bool is_uint28(int x) { return is_uintn(x, 28); }
inline int NumberOfBitsSet(uint32_t x) { inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set; unsigned int num_bits_set;
for (num_bits_set = 0; x; x >>= 1) { for (num_bits_set = 0; x; x >>= 1) {

35
deps/v8/src/ast.cc

@ -593,17 +593,6 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
} }
int Call::ComputeFeedbackSlotCount(Isolate* isolate) {
CallType call_type = GetCallType(isolate);
if (call_type == LOOKUP_SLOT_CALL || call_type == OTHER_CALL) {
// Call only uses a slot in some cases.
return 1;
}
return 0;
}
Call::CallType Call::GetCallType(Isolate* isolate) const { Call::CallType Call::GetCallType(Isolate* isolate) const {
VariableProxy* proxy = expression()->AsVariableProxy(); VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) { if (proxy != NULL) {
@ -644,10 +633,10 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
allocation_site_ = allocation_site_ =
oracle->GetCallNewAllocationSite(CallNewFeedbackSlot()); oracle->GetCallNewAllocationSite(CallNewFeedbackId());
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot()); is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackId());
if (is_monomorphic_) { if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot()); target_ = oracle->GetCallNewTarget(CallNewFeedbackId());
if (!allocation_site_.is_null()) { if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind(); elements_kind_ = allocation_site_->GetElementsKind();
} }
@ -1050,11 +1039,6 @@ CaseClause::CaseClause(Zone* zone,
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
} }
#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
}
#define DONT_OPTIMIZE_NODE(NodeType) \ #define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
@ -1067,12 +1051,6 @@ CaseClause::CaseClause(Zone* zone,
increase_node_count(); \ increase_node_count(); \
add_flag(kDontSelfOptimize); \ add_flag(kDontSelfOptimize); \
} }
#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
add_slot_node(node); \
add_flag(kDontSelfOptimize); \
}
#define DONT_CACHE_NODE(NodeType) \ #define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
@ -1107,8 +1085,8 @@ REGULAR_NODE(CountOperation)
REGULAR_NODE(BinaryOperation) REGULAR_NODE(BinaryOperation)
REGULAR_NODE(CompareOperation) REGULAR_NODE(CompareOperation)
REGULAR_NODE(ThisFunction) REGULAR_NODE(ThisFunction)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call) REGULAR_NODE(Call)
REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew) REGULAR_NODE(CallNew)
// In theory, for VariableProxy we'd have to add: // In theory, for VariableProxy we'd have to add:
// if (node->var()->IsLookupSlot()) add_flag(kDontInline); // if (node->var()->IsLookupSlot()) add_flag(kDontInline);
// But node->var() is usually not bound yet at VariableProxy creation time, and // But node->var() is usually not bound yet at VariableProxy creation time, and
@ -1133,12 +1111,11 @@ DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement) DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement) DONT_SELFOPTIMIZE_NODE(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement) DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral) DONT_CACHE_NODE(ModuleLiteral)
void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
increase_node_count(); increase_node_count();
if (node->is_jsruntime()) { if (node->is_jsruntime()) {

79
deps/v8/src/ast.h

@ -32,7 +32,6 @@
#include "assembler.h" #include "assembler.h"
#include "factory.h" #include "factory.h"
#include "feedback-slots.h"
#include "isolate.h" #include "isolate.h"
#include "jsregexp.h" #include "jsregexp.h"
#include "list-inl.h" #include "list-inl.h"
@ -182,7 +181,7 @@ class AstProperties V8_FINAL BASE_EMBEDDED {
public: public:
class Flags : public EnumSet<AstPropertiesFlag, int> {}; class Flags : public EnumSet<AstPropertiesFlag, int> {};
AstProperties() : node_count_(0) {} AstProperties() : node_count_(0) { }
Flags* flags() { return &flags_; } Flags* flags() { return &flags_; }
int node_count() { return node_count_; } int node_count() { return node_count_; }
@ -915,8 +914,7 @@ class ForEachStatement : public IterationStatement {
}; };
class ForInStatement V8_FINAL : public ForEachStatement, class ForInStatement V8_FINAL : public ForEachStatement {
public FeedbackSlotInterface {
public: public:
DECLARE_NODE_TYPE(ForInStatement) DECLARE_NODE_TYPE(ForInStatement)
@ -924,16 +922,7 @@ class ForInStatement V8_FINAL : public ForEachStatement,
return subject(); return subject();
} }
// Type feedback information. TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
int ForInFeedbackSlot() {
ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
return for_in_feedback_slot_;
}
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN }; enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; } ForInType for_in_type() const { return for_in_type_; }
void set_for_in_type(ForInType type) { for_in_type_ = type; } void set_for_in_type(ForInType type) { for_in_type_ = type; }
@ -947,13 +936,11 @@ class ForInStatement V8_FINAL : public ForEachStatement,
ForInStatement(Zone* zone, ZoneStringList* labels, int pos) ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
: ForEachStatement(zone, labels, pos), : ForEachStatement(zone, labels, pos),
for_in_type_(SLOW_FOR_IN), for_in_type_(SLOW_FOR_IN),
for_in_feedback_slot_(kInvalidFeedbackSlot),
body_id_(GetNextId(zone)), body_id_(GetNextId(zone)),
prepare_id_(GetNextId(zone)) { prepare_id_(GetNextId(zone)) {
} }
ForInType for_in_type_; ForInType for_in_type_;
int for_in_feedback_slot_;
const BailoutId body_id_; const BailoutId body_id_;
const BailoutId prepare_id_; const BailoutId prepare_id_;
}; };
@ -1746,7 +1733,7 @@ class Property V8_FINAL : public Expression {
}; };
class Call V8_FINAL : public Expression, public FeedbackSlotInterface { class Call V8_FINAL : public Expression {
public: public:
DECLARE_NODE_TYPE(Call) DECLARE_NODE_TYPE(Call)
@ -1754,16 +1741,7 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information. // Type feedback information.
virtual ComputablePhase GetComputablePhase() { return AFTER_SCOPING; } TypeFeedbackId CallFeedbackId() const { return reuse(id()); }
virtual int ComputeFeedbackSlotCount(Isolate* isolate);
virtual void SetFirstFeedbackSlot(int slot) {
call_feedback_slot_ = slot;
}
bool HasCallFeedbackSlot() const {
return call_feedback_slot_ != kInvalidFeedbackSlot;
}
int CallFeedbackSlot() const { return call_feedback_slot_; }
virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE { virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
if (expression()->IsProperty()) { if (expression()->IsProperty()) {
@ -1812,7 +1790,6 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
: Expression(zone, pos), : Expression(zone, pos),
expression_(expression), expression_(expression),
arguments_(arguments), arguments_(arguments),
call_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { return_id_(GetNextId(zone)) {
if (expression->IsProperty()) { if (expression->IsProperty()) {
expression->AsProperty()->mark_for_call(); expression->AsProperty()->mark_for_call();
@ -1825,13 +1802,12 @@ class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
Handle<JSFunction> target_; Handle<JSFunction> target_;
Handle<Cell> cell_; Handle<Cell> cell_;
int call_feedback_slot_;
const BailoutId return_id_; const BailoutId return_id_;
}; };
class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface { class CallNew V8_FINAL : public Expression {
public: public:
DECLARE_NODE_TYPE(CallNew) DECLARE_NODE_TYPE(CallNew)
@ -1839,17 +1815,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information. // Type feedback information.
virtual ComputablePhase GetComputablePhase() { return DURING_PARSE; }
virtual int ComputeFeedbackSlotCount(Isolate* isolate) { return 1; }
virtual void SetFirstFeedbackSlot(int slot) {
callnew_feedback_slot_ = slot;
}
int CallNewFeedbackSlot() {
ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
return callnew_feedback_slot_;
}
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); } TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle); void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; } virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
@ -1859,8 +1824,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
return allocation_site_; return allocation_site_;
} }
static int feedback_slots() { return 1; }
BailoutId ReturnId() const { return return_id_; } BailoutId ReturnId() const { return return_id_; }
protected: protected:
@ -1873,7 +1836,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
arguments_(arguments), arguments_(arguments),
is_monomorphic_(false), is_monomorphic_(false),
elements_kind_(GetInitialFastElementsKind()), elements_kind_(GetInitialFastElementsKind()),
callnew_feedback_slot_(kInvalidFeedbackSlot),
return_id_(GetNextId(zone)) { } return_id_(GetNextId(zone)) { }
private: private:
@ -1884,7 +1846,6 @@ class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
Handle<JSFunction> target_; Handle<JSFunction> target_;
ElementsKind elements_kind_; ElementsKind elements_kind_;
Handle<AllocationSite> allocation_site_; Handle<AllocationSite> allocation_site_;
int callnew_feedback_slot_;
const BailoutId return_id_; const BailoutId return_id_;
}; };
@ -2371,15 +2332,7 @@ class FunctionLiteral V8_FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) { void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties; ast_properties_ = *ast_properties;
} }
void set_slot_processor(DeferredFeedbackSlotProcessor* slot_processor) {
slot_processor_ = *slot_processor;
}
void ProcessFeedbackSlots(Isolate* isolate) {
slot_processor_.ProcessFeedbackSlots(isolate);
}
int slot_count() {
return slot_processor_.slot_count();
}
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; } bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; } BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
void set_dont_optimize_reason(BailoutReason reason) { void set_dont_optimize_reason(BailoutReason reason) {
@ -2429,7 +2382,6 @@ class FunctionLiteral V8_FINAL : public Expression {
ZoneList<Statement*>* body_; ZoneList<Statement*>* body_;
Handle<String> inferred_name_; Handle<String> inferred_name_;
AstProperties ast_properties_; AstProperties ast_properties_;
DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_; BailoutReason dont_optimize_reason_;
int materialized_literal_count_; int materialized_literal_count_;
@ -2904,13 +2856,10 @@ private: \
class AstConstructionVisitor BASE_EMBEDDED { class AstConstructionVisitor BASE_EMBEDDED {
public: public:
explicit AstConstructionVisitor(Zone* zone) AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
: dont_optimize_reason_(kNoReason),
zone_(zone) { }
AstProperties* ast_properties() { return &properties_; } AstProperties* ast_properties() { return &properties_; }
BailoutReason dont_optimize_reason() { return dont_optimize_reason_; } BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
DeferredFeedbackSlotProcessor* slot_processor() { return &slot_processor_; }
private: private:
template<class> friend class AstNodeFactory; template<class> friend class AstNodeFactory;
@ -2927,21 +2876,13 @@ class AstConstructionVisitor BASE_EMBEDDED {
dont_optimize_reason_ = reason; dont_optimize_reason_ = reason;
} }
void add_slot_node(FeedbackSlotInterface* slot_node) {
slot_processor_.add_slot_node(zone_, slot_node);
}
AstProperties properties_; AstProperties properties_;
DeferredFeedbackSlotProcessor slot_processor_;
BailoutReason dont_optimize_reason_; BailoutReason dont_optimize_reason_;
Zone* zone_;
}; };
class AstNullVisitor BASE_EMBEDDED { class AstNullVisitor BASE_EMBEDDED {
public: public:
explicit AstNullVisitor(Zone* zone) {}
// Node visitors. // Node visitors.
#define DEF_VISIT(type) \ #define DEF_VISIT(type) \
void Visit##type(type* node) {} void Visit##type(type* node) {}
@ -2957,9 +2898,7 @@ class AstNullVisitor BASE_EMBEDDED {
template<class Visitor> template<class Visitor>
class AstNodeFactory V8_FINAL BASE_EMBEDDED { class AstNodeFactory V8_FINAL BASE_EMBEDDED {
public: public:
explicit AstNodeFactory(Zone* zone) explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
: zone_(zone),
visitor_(zone) { }
Visitor* visitor() { return &visitor_; } Visitor* visitor() { return &visitor_; }

2
deps/v8/src/atomicops.h

@ -159,8 +159,6 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_macosx.h" #include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64) #elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h" #include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_A64
#include "atomicops_internals_a64_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM #elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h" #include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS

416
deps/v8/src/atomicops_internals_a64_gcc.h

@ -1,416 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
namespace v8 {
namespace internal {
inline void MemoryBarrier() { /* Not used. */ }
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
"clrex \n\t" // In case we didn't swap.
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
"stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
"0: \n\t"
"ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
"stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
"dmb ish \n\t" // Data memory barrier.
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"dmb ish \n\t" // Data memory barrier.
"1: \n\t"
// If the compare failed the 'dmb' is unnecessary, but we still need a
// 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
"0: \n\t"
"ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
// If the compare failed the we still need a 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
::: "memory" // Prevent gcc from reordering before the store above.
); // NOLINT
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
::: "memory" // Prevent gcc from reordering after the store below.
); // NOLINT
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
::: "memory" // Prevent gcc from reordering before the load above.
); // NOLINT
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t" // Data memory barrier.
::: "memory" // Prevent gcc from reordering after the load below.
); // NOLINT
return *ptr;
}
// 64-bit versions of the operations.
// See the 32-bit versions for comments.
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], [%[ptr]] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], [%[ptr]] \n\t"
"stxr %w[temp], %[new_value], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[new_value]"r" (new_value)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[result], [%[ptr]] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
"0: \n\t"
"ldxr %[result], [%[ptr]] \n\t"
"add %[result], %[result], %[increment] \n\t"
"stxr %w[temp], %[result], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
"dmb ish \n\t"
: [result]"=&r" (result),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[increment]"r" (increment)
: "memory"
); // NOLINT
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
"ldxr %[prev], [%[ptr]] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
"dmb ish \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
int32_t temp;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
"0: \n\t"
"ldxr %[prev], [%[ptr]] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
"stxr %w[temp], %[new_value], [%[ptr]] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp)
: [ptr]"r" (ptr),
[old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
return prev;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
::: "memory"
); // NOLINT
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
::: "memory"
); // NOLINT
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
::: "memory"
); // NOLINT
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__asm__ __volatile__ ( // NOLINT
"dmb ish \n\t"
::: "memory"
); // NOLINT
return *ptr;
}
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_

7
deps/v8/src/bootstrapper.cc

@ -1582,9 +1582,6 @@ void Genesis::InstallNativeFunctions() {
void Genesis::InstallExperimentalNativeFunctions() { void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks); INSTALL_NATIVE(JSFunction, "RunMicrotasks", run_microtasks);
INSTALL_NATIVE(JSFunction, "EnqueueExternalMicrotask",
enqueue_external_microtask);
if (FLAG_harmony_proxies) { if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap); INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap); INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@ -2569,9 +2566,7 @@ class NoTrackDoubleFieldsForSerializerScope {
} }
} }
~NoTrackDoubleFieldsForSerializerScope() { ~NoTrackDoubleFieldsForSerializerScope() {
if (Serializer::enabled()) { FLAG_track_double_fields = flag_;
FLAG_track_double_fields = flag_;
}
} }
private: private:

8
deps/v8/src/builtins.cc

@ -1599,7 +1599,9 @@ void Builtins::InitBuiltinFunctionTable() {
functions->c_code = NULL; \ functions->c_code = NULL; \
functions->s_name = #aname; \ functions->s_name = #aname; \
functions->name = k##aname; \ functions->name = k##aname; \
functions->flags = Code::ComputeHandlerFlags(Code::kind); \ functions->flags = Code::ComputeFlags( \
Code::HANDLER, MONOMORPHIC, kNoExtraICState, \
Code::NORMAL, Code::kind); \
functions->extra_args = NO_EXTRA_ARGUMENTS; \ functions->extra_args = NO_EXTRA_ARGUMENTS; \
++functions; ++functions;
@ -1625,9 +1627,7 @@ void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
// For now we generate builtin adaptor code into a stack-allocated // For now we generate builtin adaptor code into a stack-allocated
// buffer, before copying it into individual code objects. Be careful // buffer, before copying it into individual code objects. Be careful
// with alignment, some platforms don't like unaligned code. // with alignment, some platforms don't like unaligned code.
// TODO(jbramley): I had to increase the size of this buffer from 8KB because union { int force_alignment; byte buffer[8*KB]; } u;
// we can generate a lot of debug code on A64.
union { int force_alignment; byte buffer[16*KB]; } u;
// Traverse the list of builtins and generate an adaptor in a // Traverse the list of builtins and generate an adaptor in a
// separate code object for each one. // separate code object for each one.

21
deps/v8/src/char-predicates.h

@ -66,27 +66,6 @@ struct IdentifierPart {
} }
}; };
// WhiteSpace according to ECMA-262 5.1, 7.2.
struct WhiteSpace {
static inline bool Is(uc32 c) {
return c == 0x0009 || // <TAB>
c == 0x000B || // <VT>
c == 0x000C || // <FF>
c == 0xFEFF || // <BOM>
// \u0020 and \u00A0 are included in unibrow::WhiteSpace.
unibrow::WhiteSpace::Is(c);
}
};
// WhiteSpace and LineTerminator according to ECMA-262 5.1, 7.2 and 7.3.
struct WhiteSpaceOrLineTerminator {
static inline bool Is(uc32 c) {
return WhiteSpace::Is(c) || unibrow::LineTerminator::Is(c);
}
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CHAR_PREDICATES_H_ #endif // V8_CHAR_PREDICATES_H_

18
deps/v8/src/checks.h

@ -34,7 +34,6 @@
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...); extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during // The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
// development, but they should not be relied on in the final product. // development, but they should not be relied on in the final product.
#ifdef DEBUG #ifdef DEBUG
@ -52,23 +51,6 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
#define UNREACHABLE() ((void) 0) #define UNREACHABLE() ((void) 0)
#endif #endif
// Simulator specific helpers.
#if defined(USE_SIMULATOR) && defined(V8_TARGET_ARCH_A64)
// TODO(all): If possible automatically prepend an indicator like
// UNIMPLEMENTED or LOCATION.
#define ASM_UNIMPLEMENTED(message) \
__ Debug(message, __LINE__, NO_PARAM)
#define ASM_UNIMPLEMENTED_BREAK(message) \
__ Debug(message, __LINE__, \
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#define ASM_LOCATION(message) \
__ Debug("LOCATION: " message, __LINE__, NO_PARAM)
#else
#define ASM_UNIMPLEMENTED(message)
#define ASM_UNIMPLEMENTED_BREAK(message)
#define ASM_LOCATION(message)
#endif
// The CHECK macro checks that the given condition is true; if not, it // The CHECK macro checks that the given condition is true; if not, it
// prints a message to stderr and aborts. // prints a message to stderr and aborts.

89
deps/v8/src/code-stubs-hydrogen.cc

@ -81,11 +81,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HContext* context() { return context_; } HContext* context() { return context_; }
Isolate* isolate() { return info_.isolate(); } Isolate* isolate() { return info_.isolate(); }
HLoadNamedField* BuildLoadNamedField(HValue* object,
Representation representation,
int offset,
bool is_inobject);
enum ArgumentClass { enum ArgumentClass {
NONE, NONE,
SINGLE, SINGLE,
@ -252,7 +247,8 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) {
GetCodeKind(), GetCodeKind(),
GetICState(), GetICState(),
GetExtraICState(), GetExtraICState(),
GetStubType()); GetStubType(),
GetStubFlags());
Handle<Code> new_object = factory->NewCode( Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode()); desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object; return new_object;
@ -299,8 +295,7 @@ HValue* CodeStubGraphBuilder<ToNumberStub>::BuildCodeStub() {
// Check if the parameter is already a SMI or heap number. // Check if the parameter is already a SMI or heap number.
IfBuilder if_number(this); IfBuilder if_number(this);
if_number.If<HIsSmiAndBranch>(value); if_number.If<HIsSmiAndBranch>(value);
if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map(), if_number.OrIf<HCompareMap>(value, isolate()->factory()->heap_number_map());
top_info());
if_number.Then(); if_number.Then();
// Return the number. // Return the number.
@ -363,8 +358,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
HValue* elements = AddLoadElements(boilerplate); HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this); IfBuilder if_fixed_cow(this);
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map(), if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
top_info());
if_fixed_cow.Then(); if_fixed_cow.Then();
push_value = BuildCloneShallowArray(boilerplate, push_value = BuildCloneShallowArray(boilerplate,
allocation_site, allocation_site,
@ -375,7 +369,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
if_fixed_cow.Else(); if_fixed_cow.Else();
IfBuilder if_fixed(this); IfBuilder if_fixed(this);
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map(), top_info()); if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then(); if_fixed.Then();
push_value = BuildCloneShallowArray(boilerplate, push_value = BuildCloneShallowArray(boilerplate,
allocation_site, allocation_site,
@ -536,11 +530,15 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(), Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(),
object); object);
HInstruction* feedback_vector = GetParameter(0); // We use a hammer (SkipWriteBarrier()) to indicate that we know the input
HInstruction* slot = GetParameter(1); // cell is really a Cell, and so no write barrier is needed.
Add<HStoreKeyed>(feedback_vector, slot, object, FAST_ELEMENTS, // TODO(mvstanton): Add a debug_code check to verify the input cell is really
INITIALIZING_STORE); // a cell. (perhaps with a new instruction, HAssert).
return feedback_vector; HInstruction* cell = GetParameter(0);
HObjectAccess access = HObjectAccess::ForCellValue();
store = Add<HStoreNamedField>(cell, access, object);
store->SkipWriteBarrier();
return cell;
} }
@ -554,7 +552,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess( HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, GetParameter(0), GetParameter(1), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(), casted_stub()->is_js_array(), casted_stub()->elements_kind(),
LOAD, NEVER_RETURN_HOLE, STANDARD_STORE); false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load; return load;
} }
@ -564,32 +562,14 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode(Isolate* isolate) {
} }
HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
HValue* object,
Representation representation,
int offset,
bool is_inobject) {
HObjectAccess access = is_inobject
? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
: HObjectAccess::ForBackingStoreOffset(offset, representation);
if (representation.IsDouble()) {
// Load the heap number.
object = Add<HLoadNamedField>(
object, static_cast<HValue*>(NULL),
access.WithRepresentation(Representation::Tagged()));
// Load the double value from it.
access = HObjectAccess::ForHeapNumberValue();
}
return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
}
template<> template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
return BuildLoadNamedField(GetParameter(0), Representation rep = casted_stub()->representation();
casted_stub()->representation(), int offset = casted_stub()->offset();
casted_stub()->offset(), HObjectAccess access = casted_stub()->is_inobject() ?
casted_stub()->is_inobject()); HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
HObjectAccess::ForBackingStoreOffset(offset, rep);
return AddLoadNamedField(GetParameter(0), access);
} }
@ -600,10 +580,12 @@ Handle<Code> LoadFieldStub::GenerateCode(Isolate* isolate) {
template<> template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
return BuildLoadNamedField(GetParameter(0), Representation rep = casted_stub()->representation();
casted_stub()->representation(), int offset = casted_stub()->offset();
casted_stub()->offset(), HObjectAccess access = casted_stub()->is_inobject() ?
casted_stub()->is_inobject()); HObjectAccess::ForObservableJSObjectOffset(offset, rep) :
HObjectAccess::ForBackingStoreOffset(offset, rep);
return AddLoadNamedField(GetParameter(0), access);
} }
@ -617,7 +599,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess( BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), GetParameter(0), GetParameter(1), GetParameter(2),
casted_stub()->is_js_array(), casted_stub()->elements_kind(), casted_stub()->is_js_array(), casted_stub()->elements_kind(),
STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode()); true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2); return GetParameter(2);
} }
@ -1051,16 +1033,13 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
Handle<PropertyCell> placeholder_cell = Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value); isolate()->factory()->NewPropertyCell(placeholer_value);
HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2); HParameter* value = GetParameter(2);
if (stub->check_global()) { // Check that the map of the global has not changed: use a placeholder map
// Check that the map of the global has not changed: use a placeholder map // that will be replaced later with the global object's map.
// that will be replaced later with the global object's map. Handle<Map> placeholder_map = isolate()->factory()->meta_map();
Handle<Map> placeholder_map = isolate()->factory()->meta_map(); Add<HCheckMaps>(receiver, placeholder_map, top_info());
HValue* global = Add<HConstant>(
StoreGlobalStub::global_placeholder(isolate()));
Add<HCheckMaps>(global, placeholder_map, top_info());
}
HValue* cell = Add<HConstant>(placeholder_cell); HValue* cell = Add<HConstant>(placeholder_cell);
HObjectAccess access(HObjectAccess::ForCellPayload(isolate())); HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
@ -1117,7 +1096,7 @@ HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(object, key, value, BuildUncheckedMonomorphicElementAccess(object, key, value,
casted_stub()->is_jsarray(), casted_stub()->is_jsarray(),
casted_stub()->to_kind(), casted_stub()->to_kind(),
STORE, ALLOW_RETURN_HOLE, true, ALLOW_RETURN_HOLE,
casted_stub()->store_mode()); casted_stub()->store_mode());
} }

3
deps/v8/src/code-stubs.cc

@ -119,7 +119,8 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) {
GetCodeKind(), GetCodeKind(),
GetICState(), GetICState(),
GetExtraICState(), GetExtraICState(),
GetStubType()); GetStubType(),
GetStubFlags());
Handle<Code> new_object = factory->NewCode( Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode()); desc, flags, masm.CodeObject(), NeedsImmovableCode());
return new_object; return new_object;

59
deps/v8/src/code-stubs.h

@ -101,7 +101,7 @@ namespace internal {
V(KeyedLoadField) V(KeyedLoadField)
// List of code stubs only used on ARM platforms. // List of code stubs only used on ARM platforms.
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_A64) #if V8_TARGET_ARCH_ARM
#define CODE_STUB_LIST_ARM(V) \ #define CODE_STUB_LIST_ARM(V) \
V(GetProperty) \ V(GetProperty) \
V(SetProperty) \ V(SetProperty) \
@ -188,6 +188,9 @@ class CodeStub BASE_EMBEDDED {
virtual Code::StubType GetStubType() { virtual Code::StubType GetStubType() {
return Code::NORMAL; return Code::NORMAL;
} }
virtual int GetStubFlags() {
return -1;
}
virtual void PrintName(StringStream* stream); virtual void PrintName(StringStream* stream);
@ -439,8 +442,6 @@ class RuntimeCallHelper {
#include "ia32/code-stubs-ia32.h" #include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h" #include "x64/code-stubs-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/code-stubs-a64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h" #include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
@ -882,7 +883,7 @@ class HICStub: public HydrogenCodeStub {
class HandlerStub: public HICStub { class HandlerStub: public HICStub {
public: public:
virtual Code::Kind GetCodeKind() const { return Code::HANDLER; } virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
virtual ExtraICState GetExtraICState() { return kind(); } virtual int GetStubFlags() { return kind(); }
protected: protected:
HandlerStub() : HICStub() { } HandlerStub() : HICStub() { }
@ -954,27 +955,19 @@ class LoadFieldStub: public HandlerStub {
class StoreGlobalStub : public HandlerStub { class StoreGlobalStub : public HandlerStub {
public: public:
explicit StoreGlobalStub(bool is_constant, bool check_global) { explicit StoreGlobalStub(bool is_constant) {
bit_field_ = IsConstantBits::encode(is_constant) | bit_field_ = IsConstantBits::encode(is_constant);
CheckGlobalBits::encode(check_global);
}
static Handle<HeapObject> global_placeholder(Isolate* isolate) {
return isolate->factory()->uninitialized_value();
} }
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate, Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
GlobalObject* global, Map* receiver_map,
PropertyCell* cell) { PropertyCell* cell) {
Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate); Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
if (check_global()) { // Replace the placeholder cell and global object map with the actual global
// Replace the placeholder cell and global object map with the actual // cell and receiver map.
// global cell and receiver map.
code->ReplaceNthObject(1, global_placeholder(isolate)->map(), global);
code->ReplaceNthObject(1, isolate->heap()->meta_map(), global->map());
}
Map* cell_map = isolate->heap()->global_property_cell_map(); Map* cell_map = isolate->heap()->global_property_cell_map();
code->ReplaceNthObject(1, cell_map, cell); code->ReplaceNthObject(1, cell_map, cell);
code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
return code; return code;
} }
@ -986,12 +979,11 @@ class StoreGlobalStub : public HandlerStub {
Isolate* isolate, Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor); CodeStubInterfaceDescriptor* descriptor);
bool is_constant() const { virtual ExtraICState GetExtraICState() { return bit_field_; }
bool is_constant() {
return IsConstantBits::decode(bit_field_); return IsConstantBits::decode(bit_field_);
} }
bool check_global() const {
return CheckGlobalBits::decode(bit_field_);
}
void set_is_constant(bool value) { void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value); bit_field_ = IsConstantBits::update(bit_field_, value);
} }
@ -1004,11 +996,13 @@ class StoreGlobalStub : public HandlerStub {
} }
private: private:
virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; } Major MajorKey() { return StoreGlobal; }
class IsConstantBits: public BitField<bool, 0, 1> {}; class IsConstantBits: public BitField<bool, 0, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 1, 8> {}; class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
class CheckGlobalBits: public BitField<bool, 9, 1> {};
int bit_field_;
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub); DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
}; };
@ -1016,14 +1010,13 @@ class StoreGlobalStub : public HandlerStub {
class CallApiFunctionStub : public PlatformCodeStub { class CallApiFunctionStub : public PlatformCodeStub {
public: public:
CallApiFunctionStub(bool is_store, CallApiFunctionStub(bool restore_context,
bool call_data_undefined, bool call_data_undefined,
int argc) { int argc) {
bit_field_ = bit_field_ =
IsStoreBits::encode(is_store) | RestoreContextBits::encode(restore_context) |
CallDataUndefinedBits::encode(call_data_undefined) | CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc); ArgumentBits::encode(argc);
ASSERT(!is_store || argc == 1);
} }
private: private:
@ -1031,7 +1024,7 @@ class CallApiFunctionStub : public PlatformCodeStub {
virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; } virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
virtual int MinorKey() V8_OVERRIDE { return bit_field_; } virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
class IsStoreBits: public BitField<bool, 0, 1> {}; class RestoreContextBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {}; class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {}; class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
@ -1873,21 +1866,23 @@ class DoubleToIStub : public PlatformCodeStub {
int offset, int offset,
bool is_truncating, bool is_truncating,
bool skip_fastpath = false) : bit_field_(0) { bool skip_fastpath = false) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code()) | bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code()) | DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) | OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating) | IsTruncatingBits::encode(is_truncating) |
SkipFastPathBits::encode(skip_fastpath) | SkipFastPathBits::encode(skip_fastpath) |
SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ? SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ?
CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0); CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0);
} }
Register source() { Register source() {
return Register::from_code(SourceRegisterBits::decode(bit_field_)); Register result = { SourceRegisterBits::decode(bit_field_) };
return result;
} }
Register destination() { Register destination() {
return Register::from_code(DestinationRegisterBits::decode(bit_field_)); Register result = { DestinationRegisterBits::decode(bit_field_) };
return result;
} }
bool is_truncating() { bool is_truncating() {

2
deps/v8/src/codegen.cc

@ -165,8 +165,6 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
function->debug_name()->ToCString().get(), tracing_scope.file()); function->debug_name()->ToCString().get(), tracing_scope.file());
} }
PrintF(tracing_scope.file(), "--- Optimized code ---\n"); PrintF(tracing_scope.file(), "--- Optimized code ---\n");
PrintF(tracing_scope.file(),
"optimization_id = %d\n", info->optimization_id());
} else { } else {
PrintF(tracing_scope.file(), "--- Code ---\n"); PrintF(tracing_scope.file(), "--- Code ---\n");
} }

2
deps/v8/src/codegen.h

@ -72,8 +72,6 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#include "ia32/codegen-ia32.h" #include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h" #include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/codegen-a64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h" #include "arm/codegen-arm.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS

30
deps/v8/src/compiler.cc

@ -60,8 +60,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script,
script_(script), script_(script),
osr_ast_id_(BailoutId::None()), osr_ast_id_(BailoutId::None()),
parameter_count_(0), parameter_count_(0),
this_has_uses_(true), this_has_uses_(true) {
optimization_id_(-1) {
Initialize(script->GetIsolate(), BASE, zone); Initialize(script->GetIsolate(), BASE, zone);
} }
@ -73,8 +72,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
script_(Handle<Script>(Script::cast(shared_info->script()))), script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()), osr_ast_id_(BailoutId::None()),
parameter_count_(0), parameter_count_(0),
this_has_uses_(true), this_has_uses_(true) {
optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone); Initialize(script_->GetIsolate(), BASE, zone);
} }
@ -88,8 +86,7 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
context_(closure->context()), context_(closure->context()),
osr_ast_id_(BailoutId::None()), osr_ast_id_(BailoutId::None()),
parameter_count_(0), parameter_count_(0),
this_has_uses_(true), this_has_uses_(true) {
optimization_id_(-1) {
Initialize(script_->GetIsolate(), BASE, zone); Initialize(script_->GetIsolate(), BASE, zone);
} }
@ -101,8 +98,7 @@ CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
IsLazy::encode(true)), IsLazy::encode(true)),
osr_ast_id_(BailoutId::None()), osr_ast_id_(BailoutId::None()),
parameter_count_(0), parameter_count_(0),
this_has_uses_(true), this_has_uses_(true) {
optimization_id_(-1) {
Initialize(isolate, STUB, zone); Initialize(isolate, STUB, zone);
code_stub_ = stub; code_stub_ = stub;
} }
@ -215,7 +211,8 @@ Code::Flags CompilationInfo::flags() const {
return Code::ComputeFlags(code_stub()->GetCodeKind(), return Code::ComputeFlags(code_stub()->GetCodeKind(),
code_stub()->GetICState(), code_stub()->GetICState(),
code_stub()->GetExtraICState(), code_stub()->GetExtraICState(),
code_stub()->GetStubType()); code_stub()->GetStubType(),
code_stub()->GetStubFlags());
} else { } else {
return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
} }
@ -246,13 +243,6 @@ bool CompilationInfo::ShouldSelfOptimize() {
} }
void CompilationInfo::PrepareForCompilation(Scope* scope) {
ASSERT(scope_ == NULL);
scope_ = scope;
function()->ProcessFeedbackSlots(isolate_);
}
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder { class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public: public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info) explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@ -373,7 +363,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Note that we use the same AST that we will use for generating the // Note that we use the same AST that we will use for generating the
// optimized code. // optimized code.
unoptimized.SetFunction(info()->function()); unoptimized.SetFunction(info()->function());
unoptimized.PrepareForCompilation(info()->scope()); unoptimized.SetScope(info()->scope());
unoptimized.SetContext(info()->context()); unoptimized.SetContext(info()->context());
if (should_recompile) unoptimized.EnableDeoptimizationSupport(); if (should_recompile) unoptimized.EnableDeoptimizationSupport();
bool succeeded = FullCodeGenerator::MakeCode(&unoptimized); bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
@ -408,7 +398,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
// Type-check the function. // Type-check the function.
AstTyper::Run(info()); AstTyper::Run(info());
graph_builder_ = FLAG_hydrogen_track_positions graph_builder_ = FLAG_emit_opt_code_positions
? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info()) ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
: new(info()->zone()) HOptimizedGraphBuilder(info()); : new(info()->zone()) HOptimizedGraphBuilder(info());
@ -992,7 +982,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Precondition: code has been parsed and scopes have been analyzed. // Precondition: code has been parsed and scopes have been analyzed.
CompilationInfoWithZone info(script); CompilationInfoWithZone info(script);
info.SetFunction(literal); info.SetFunction(literal);
info.PrepareForCompilation(literal->scope()); info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode()); info.SetLanguageMode(literal->scope()->language_mode());
Isolate* isolate = info.isolate(); Isolate* isolate = info.isolate();
@ -1188,7 +1178,7 @@ Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
PrintF("[failed to optimize "); PrintF("[failed to optimize ");
function->PrintName(); function->PrintName();
PrintF(": %s]\n", GetBailoutReason(info->bailout_reason())); PrintF("]\n");
} }
if (isolate->has_pending_exception()) isolate->clear_pending_exception(); if (isolate->has_pending_exception()) isolate->clear_pending_exception();

11
deps/v8/src/compiler.h

@ -175,8 +175,10 @@ class CompilationInfo {
ASSERT(function_ == NULL); ASSERT(function_ == NULL);
function_ = literal; function_ = literal;
} }
// When the scope is applied, we may have deferred work to do on the function. void SetScope(Scope* scope) {
void PrepareForCompilation(Scope* scope); ASSERT(scope_ == NULL);
scope_ = scope;
}
void SetGlobalScope(Scope* global_scope) { void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL); ASSERT(global_scope_ == NULL);
global_scope_ = global_scope; global_scope_ = global_scope;
@ -227,7 +229,6 @@ class CompilationInfo {
SetMode(OPTIMIZE); SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id; osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized; unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
} }
void DisableOptimization(); void DisableOptimization();
@ -316,8 +317,6 @@ class CompilationInfo {
return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_); return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
} }
int optimization_id() const { return optimization_id_; }
protected: protected:
CompilationInfo(Handle<Script> script, CompilationInfo(Handle<Script> script,
Zone* zone); Zone* zone);
@ -453,8 +452,6 @@ class CompilationInfo {
Handle<Foreign> object_wrapper_; Handle<Foreign> object_wrapper_;
int optimization_id_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo); DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
}; };

2
deps/v8/src/contexts.h

@ -167,7 +167,6 @@ enum BindingFlags {
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \ error_message_for_code_gen_from_strings) \
V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \ V(RUN_MICROTASKS_INDEX, JSFunction, run_microtasks) \
V(ENQUEUE_EXTERNAL_MICROTASK_INDEX, JSFunction, enqueue_external_microtask) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \ to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@ -319,7 +318,6 @@ class Context: public FixedArray {
ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX, RUN_MICROTASKS_INDEX,
ENQUEUE_EXTERNAL_MICROTASK_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX, DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX, DERIVED_GET_TRAP_INDEX,

2
deps/v8/src/conversions-inl.h

@ -128,7 +128,7 @@ inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current, Iterator* current,
EndMark end) { EndMark end) {
while (*current != end) { while (*current != end) {
if (!unicode_cache->IsWhiteSpaceOrLineTerminator(**current)) return true; if (!unicode_cache->IsWhiteSpace(**current)) return true;
++*current; ++*current;
} }
return false; return false;

2
deps/v8/src/dateparser.h

@ -122,7 +122,7 @@ class DateParser : public AllStatic {
} }
bool SkipWhiteSpace() { bool SkipWhiteSpace() {
if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) { if (unicode_cache_->IsWhiteSpace(ch_)) {
Next(); Next();
return true; return true;
} }

2
deps/v8/src/debug.cc

@ -792,7 +792,7 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
isolate->ComputeLocation(&computed_location); isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject( Handle<Object> message = MessageHandler::MakeMessageObject(
isolate, "error_loading_debugger", &computed_location, isolate, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<JSArray>()); Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception()); ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) { if (!exception.is_null()) {
isolate->set_pending_exception(*exception); isolate->set_pending_exception(*exception);

65
deps/v8/src/deoptimizer.cc

@ -731,12 +731,6 @@ void Deoptimizer::DoComputeOutputFrames() {
LOG(isolate(), CodeDeoptEvent(compiled_code_)); LOG(isolate(), CodeDeoptEvent(compiled_code_));
} }
ElapsedTimer timer; ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
if (trace_scope_ != NULL) { if (trace_scope_ != NULL) {
timer.Start(); timer.Start();
PrintF(trace_scope_->file(), PrintF(trace_scope_->file(),
@ -745,8 +739,7 @@ void Deoptimizer::DoComputeOutputFrames() {
reinterpret_cast<intptr_t>(function_)); reinterpret_cast<intptr_t>(function_));
PrintFunctionName(); PrintFunctionName();
PrintF(trace_scope_->file(), PrintF(trace_scope_->file(),
" (opt #%d) @%d, FP to SP delta: %d]\n", " @%d, FP to SP delta: %d]\n",
input_data->OptimizationId()->value(),
bailout_id_, bailout_id_,
fp_to_sp_delta_); fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT) { if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
@ -754,6 +747,10 @@ void Deoptimizer::DoComputeOutputFrames() {
} }
} }
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_); BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray(); ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index = unsigned translation_index =
@ -1753,7 +1750,11 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() {
Handle<JSObject> arguments = Handle<JSObject>::cast( Handle<JSObject> arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function)); Accessors::FunctionGetArguments(function));
materialized_objects_->Add(arguments); materialized_objects_->Add(arguments);
materialization_value_index_ += length; // To keep consistent object counters, we still materialize the
// nested values (but we throw them away).
for (int i = 0; i < length; ++i) {
MaterializeNextValue();
}
} else if (desc.is_arguments()) { } else if (desc.is_arguments()) {
// Construct an arguments object and copy the parameters to a newly // Construct an arguments object and copy the parameters to a newly
// allocated arguments object backing store. // allocated arguments object backing store.
@ -2691,9 +2692,6 @@ FrameDescription::FrameDescription(uint32_t frame_size,
constant_pool_(kZapUint32) { constant_pool_(kZapUint32) {
// Zap all the registers. // Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) { for (int r = 0; r < Register::kNumRegisters; r++) {
// TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
// isn't used before the next safepoint, the GC will try to scan it as a
// tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32); SetRegister(r, kZapUint32);
} }
@ -2998,8 +2996,7 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
} }
case Translation::ARGUMENTS_OBJECT: case Translation::ARGUMENTS_OBJECT:
// This can be only emitted for local slots not for argument slots. return SlotRef::NewArgumentsObject(iterator->Next());
break;
case Translation::CAPTURED_OBJECT: { case Translation::CAPTURED_OBJECT: {
return SlotRef::NewDeferredObject(iterator->Next()); return SlotRef::NewDeferredObject(iterator->Next());
@ -3049,7 +3046,7 @@ SlotRef SlotRefValueBuilder::ComputeSlotForNextArgument(
break; break;
} }
UNREACHABLE(); FATAL("We should never get here - unexpected deopt info.");
return SlotRef(); return SlotRef();
} }
@ -3129,9 +3126,8 @@ SlotRefValueBuilder::SlotRefValueBuilder(JavaScriptFrame* frame,
// the nested slots of captured objects // the nested slots of captured objects
number_of_slots--; number_of_slots--;
SlotRef& slot = slot_refs_.last(); SlotRef& slot = slot_refs_.last();
if (slot.Representation() == SlotRef::DEFERRED_OBJECT) { ASSERT(slot.Representation() != SlotRef::ARGUMENTS_OBJECT);
number_of_slots += slot.DeferredObjectLength(); number_of_slots += slot.GetChildrenCount();
}
if (slot.Representation() == SlotRef::DEFERRED_OBJECT || if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
slot.Representation() == SlotRef::DUPLICATE_OBJECT) { slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
should_deopt = true; should_deopt = true;
@ -3185,7 +3181,7 @@ Handle<Object> SlotRef::GetValue(Isolate* isolate) {
return literal_; return literal_;
default: default:
UNREACHABLE(); FATAL("We should never get here - unexpected deopt info.");
return Handle<Object>::null(); return Handle<Object>::null();
} }
} }
@ -3215,19 +3211,18 @@ Handle<Object> SlotRefValueBuilder::GetPreviouslyMaterialized(
previously_materialized_objects_->get(object_index), isolate); previously_materialized_objects_->get(object_index), isolate);
materialized_objects_.Add(return_value); materialized_objects_.Add(return_value);
// Now need to skip all nested objects (and possibly read them from // Now need to skip all the nested objects (and possibly read them from
// the materialization store, too) // the materialization store, too).
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
SlotRef& slot = slot_refs_[current_slot_]; SlotRef& slot = slot_refs_[current_slot_];
current_slot_++; current_slot_++;
// For nested deferred objects, we need to read its properties // We need to read all the nested objects - add them to the
if (slot.Representation() == SlotRef::DEFERRED_OBJECT) { // number of objects we need to process.
length += slot.DeferredObjectLength(); length += slot.GetChildrenCount();
}
// For nested deferred and duplicate objects, we need to put them into // Put the nested deferred/duplicate objects into our materialization
// our materialization array // array.
if (slot.Representation() == SlotRef::DEFERRED_OBJECT || if (slot.Representation() == SlotRef::DEFERRED_OBJECT ||
slot.Representation() == SlotRef::DUPLICATE_OBJECT) { slot.Representation() == SlotRef::DUPLICATE_OBJECT) {
int nested_object_index = materialized_objects_.length(); int nested_object_index = materialized_objects_.length();
@ -3253,8 +3248,20 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
case SlotRef::LITERAL: { case SlotRef::LITERAL: {
return slot.GetValue(isolate); return slot.GetValue(isolate);
} }
case SlotRef::ARGUMENTS_OBJECT: {
// We should never need to materialize an arguments object,
// but we still need to put something into the array
// so that the indexing is consistent.
materialized_objects_.Add(isolate->factory()->undefined_value());
int length = slot.GetChildrenCount();
for (int i = 0; i < length; ++i) {
// We don't need the argument, just ignore it
GetNext(isolate, lvl + 1);
}
return isolate->factory()->undefined_value();
}
case SlotRef::DEFERRED_OBJECT: { case SlotRef::DEFERRED_OBJECT: {
int length = slot.DeferredObjectLength(); int length = slot.GetChildrenCount();
ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL || ASSERT(slot_refs_[current_slot_].Representation() == SlotRef::LITERAL ||
slot_refs_[current_slot_].Representation() == SlotRef::TAGGED); slot_refs_[current_slot_].Representation() == SlotRef::TAGGED);
@ -3323,7 +3330,7 @@ Handle<Object> SlotRefValueBuilder::GetNext(Isolate* isolate, int lvl) {
break; break;
} }
UNREACHABLE(); FATAL("We should never get here - unexpected deopt slot kind.");
return Handle<Object>::null(); return Handle<Object>::null();
} }

20
deps/v8/src/deoptimizer.h

@ -794,7 +794,9 @@ class SlotRef BASE_EMBEDDED {
// with the DeferredObjectLength() method // with the DeferredObjectLength() method
// (the SlotRefs of the nested objects follow // (the SlotRefs of the nested objects follow
// this SlotRef in the depth-first order.) // this SlotRef in the depth-first order.)
DUPLICATE_OBJECT // Duplicated object of a deferred object. DUPLICATE_OBJECT, // Duplicated object of a deferred object.
ARGUMENTS_OBJECT // Arguments object - only used to keep indexing
// in sync, it should not be materialized.
}; };
SlotRef() SlotRef()
@ -806,6 +808,13 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Isolate* isolate, Object* literal) SlotRef(Isolate* isolate, Object* literal)
: literal_(literal, isolate), representation_(LITERAL) { } : literal_(literal, isolate), representation_(LITERAL) { }
static SlotRef NewArgumentsObject(int length) {
SlotRef slot;
slot.representation_ = ARGUMENTS_OBJECT;
slot.deferred_object_length_ = length;
return slot;
}
static SlotRef NewDeferredObject(int length) { static SlotRef NewDeferredObject(int length) {
SlotRef slot; SlotRef slot;
slot.representation_ = DEFERRED_OBJECT; slot.representation_ = DEFERRED_OBJECT;
@ -822,7 +831,14 @@ class SlotRef BASE_EMBEDDED {
return slot; return slot;
} }
int DeferredObjectLength() { return deferred_object_length_; } int GetChildrenCount() {
if (representation_ == DEFERRED_OBJECT ||
representation_ == ARGUMENTS_OBJECT) {
return deferred_object_length_;
} else {
return 0;
}
}
int DuplicateObjectId() { return duplicate_object_id_; } int DuplicateObjectId() { return duplicate_object_id_; }

28
deps/v8/src/execution.cc

@ -368,20 +368,6 @@ void Execution::RunMicrotasks(Isolate* isolate) {
} }
void Execution::EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask) {
bool threw = false;
Handle<Object> args[] = { microtask };
Execution::Call(
isolate,
isolate->enqueue_external_microtask(),
isolate->factory()->undefined_value(),
1,
args,
&threw);
ASSERT(!threw);
}
bool StackGuard::IsStackOverflow() { bool StackGuard::IsStackOverflow() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
return (thread_local_.jslimit_ != kInterruptLimit && return (thread_local_.jslimit_ != kInterruptLimit &&
@ -516,15 +502,15 @@ void StackGuard::FullDeopt() {
} }
bool StackGuard::IsDeoptMarkedAllocationSites() { bool StackGuard::IsDeoptMarkedCode() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & DEOPT_MARKED_ALLOCATION_SITES) != 0; return (thread_local_.interrupt_flags_ & DEOPT_MARKED_CODE) != 0;
} }
void StackGuard::DeoptMarkedAllocationSites() { void StackGuard::DeoptMarkedCode() {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= DEOPT_MARKED_ALLOCATION_SITES; thread_local_.interrupt_flags_ |= DEOPT_MARKED_CODE;
set_interrupt_limits(access); set_interrupt_limits(access);
} }
@ -1040,9 +1026,9 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(FULL_DEOPT); stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate); Deoptimizer::DeoptimizeAll(isolate);
} }
if (stack_guard->IsDeoptMarkedAllocationSites()) { if (stack_guard->IsDeoptMarkedCode()) {
stack_guard->Continue(DEOPT_MARKED_ALLOCATION_SITES); stack_guard->Continue(DEOPT_MARKED_CODE);
isolate->heap()->DeoptMarkedAllocationSites(); Deoptimizer::DeoptimizeMarkedCode(isolate);
} }
if (stack_guard->IsInstallCodeRequest()) { if (stack_guard->IsInstallCodeRequest()) {
ASSERT(isolate->concurrent_recompilation_enabled()); ASSERT(isolate->concurrent_recompilation_enabled());

9
deps/v8/src/execution.h

@ -45,7 +45,7 @@ enum InterruptFlag {
FULL_DEOPT = 1 << 6, FULL_DEOPT = 1 << 6,
INSTALL_CODE = 1 << 7, INSTALL_CODE = 1 << 7,
API_INTERRUPT = 1 << 8, API_INTERRUPT = 1 << 8,
DEOPT_MARKED_ALLOCATION_SITES = 1 << 9 DEOPT_MARKED_CODE = 1 << 9
}; };
@ -175,7 +175,6 @@ class Execution : public AllStatic {
bool* has_pending_exception); bool* has_pending_exception);
static void RunMicrotasks(Isolate* isolate); static void RunMicrotasks(Isolate* isolate);
static void EnqueueMicrotask(Isolate* isolate, Handle<Object> microtask);
}; };
@ -223,8 +222,8 @@ class StackGuard {
void RequestInstallCode(); void RequestInstallCode();
bool IsFullDeopt(); bool IsFullDeopt();
void FullDeopt(); void FullDeopt();
bool IsDeoptMarkedAllocationSites(); bool IsDeoptMarkedCode();
void DeoptMarkedAllocationSites(); void DeoptMarkedCode();
void Continue(InterruptFlag after_what); void Continue(InterruptFlag after_what);
void RequestInterrupt(InterruptCallback callback, void* data); void RequestInterrupt(InterruptCallback callback, void* data);
@ -282,7 +281,7 @@ class StackGuard {
void EnableInterrupts(); void EnableInterrupts();
void DisableInterrupts(); void DisableInterrupts();
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_A64 #if V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe); static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8); static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
#else #else

8
deps/v8/src/factory.cc

@ -1300,6 +1300,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
} }
Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->InternalizeString(*value), String);
}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor, Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) { PretenureFlag pretenure) {
JSFunction::EnsureHasInitialMap(constructor); JSFunction::EnsureHasInitialMap(constructor);
@ -1566,6 +1572,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
int start_position, int start_position,
int end_position, int end_position,
Handle<Object> script, Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames) { Handle<Object> stack_frames) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSMessageObject(*type, isolate()->heap()->AllocateJSMessageObject(*type,
@ -1573,6 +1580,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
start_position, start_position,
end_position, end_position,
*script, *script,
*stack_trace,
*stack_frames), *stack_frames),
JSMessageObject); JSMessageObject);
} }

4
deps/v8/src/factory.h

@ -225,6 +225,9 @@ class Factory {
Handle<Context> previous, Handle<Context> previous,
Handle<ScopeInfo> scope_info); Handle<ScopeInfo> scope_info);
// Return the internalized version of the passed in string.
Handle<String> InternalizedStringFromString(Handle<String> value);
// Allocate a new struct. The struct is pretenured (allocated directly in // Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation). // the old generation).
Handle<Struct> NewStruct(InstanceType type); Handle<Struct> NewStruct(InstanceType type);
@ -525,6 +528,7 @@ class Factory {
int start_position, int start_position,
int end_position, int end_position,
Handle<Object> script, Handle<Object> script,
Handle<Object> stack_trace,
Handle<Object> stack_frames); Handle<Object> stack_frames);
Handle<SeededNumberDictionary> DictionaryAtNumberPut( Handle<SeededNumberDictionary> DictionaryAtNumberPut(

110
deps/v8/src/feedback-slots.h

@ -1,110 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FEEDBACK_SLOTS_H_
#define V8_FEEDBACK_SLOTS_H_
#include "v8.h"
#include "isolate.h"
namespace v8 {
namespace internal {
enum ComputablePhase {
DURING_PARSE,
AFTER_SCOPING
};
class FeedbackSlotInterface {
public:
static const int kInvalidFeedbackSlot = -1;
virtual ~FeedbackSlotInterface() {}
// When can we ask how many feedback slots are necessary?
virtual ComputablePhase GetComputablePhase() = 0;
virtual int ComputeFeedbackSlotCount(Isolate* isolate) = 0;
virtual void SetFirstFeedbackSlot(int slot) = 0;
};
class DeferredFeedbackSlotProcessor {
public:
DeferredFeedbackSlotProcessor()
: slot_nodes_(NULL),
slot_count_(0) { }
void add_slot_node(Zone* zone, FeedbackSlotInterface* slot) {
if (slot->GetComputablePhase() == DURING_PARSE) {
// No need to add to the list
int count = slot->ComputeFeedbackSlotCount(zone->isolate());
slot->SetFirstFeedbackSlot(slot_count_);
slot_count_ += count;
} else {
if (slot_nodes_ == NULL) {
slot_nodes_ = new(zone) ZoneList<FeedbackSlotInterface*>(10, zone);
}
slot_nodes_->Add(slot, zone);
}
}
void ProcessFeedbackSlots(Isolate* isolate) {
// Scope analysis must have been done.
if (slot_nodes_ == NULL) {
return;
}
int current_slot = slot_count_;
for (int i = 0; i < slot_nodes_->length(); i++) {
FeedbackSlotInterface* slot_interface = slot_nodes_->at(i);
int count = slot_interface->ComputeFeedbackSlotCount(isolate);
if (count > 0) {
slot_interface->SetFirstFeedbackSlot(current_slot);
current_slot += count;
}
}
slot_count_ = current_slot;
slot_nodes_->Clear();
}
int slot_count() {
ASSERT(slot_count_ >= 0);
return slot_count_;
}
private:
ZoneList<FeedbackSlotInterface*>* slot_nodes_;
int slot_count_;
};
} } // namespace v8::internal
#endif // V8_FEEDBACK_SLOTS_H_

43
deps/v8/src/flag-definitions.h

@ -234,6 +234,7 @@ DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields) DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields) DEFINE_implication(track_computed_fields, track_fields)
DEFINE_bool(smi_binop, true, "support smi representation in binary operations") DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
DEFINE_bool(smi_x64_store_opt, false, "optimized stores of smi on x64")
// Flags for optimization types. // Flags for optimization types.
DEFINE_bool(optimize_for_size, false, DEFINE_bool(optimize_for_size, false,
@ -254,9 +255,6 @@ DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining") DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis") DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding") DEFINE_bool(use_allocation_folding, true, "use allocation folding")
DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
DEFINE_bool(use_write_barrier_elimination, true,
"eliminate write barriers targeting allocations in optimized code")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels") DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600, DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining") "maximum source size in bytes considered for a single inlining")
@ -418,6 +416,10 @@ DEFINE_bool(disable_native_files, false, "disable builtin natives files")
// builtins-ia32.cc // builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation") DEFINE_bool(inline_new, true, "use fast inline allocation")
// checks.cc
DEFINE_bool(stack_trace_on_abort, true,
"print a stack trace if an assertion failure occurs")
// codegen-ia32.cc / codegen-arm.cc // codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false, DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated") "print name of functions for which code is generated")
@ -533,7 +535,6 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping") DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
DEFINE_int(sweeper_threads, 0, DEFINE_int(sweeper_threads, 0,
"number of parallel and concurrent sweeping threads") "number of parallel and concurrent sweeping threads")
DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC") DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif #endif
@ -581,35 +582,19 @@ DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
DEFINE_bool(trace_parse, false, "trace parsing and preparsing") DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
// simulator-arm.cc, simulator-a64.cc and simulator-mips.cc // simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution") DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false, DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator") "Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
#ifdef V8_TARGET_ARCH_A64
DEFINE_int(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
#else
DEFINE_int(sim_stack_alignment, 8, DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)") "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
#endif
DEFINE_int(sim_stack_size, 2 * MB / KB,
"Stack size of the A64 simulator in kBytes (default is 2 MB)")
DEFINE_bool(log_regs_modified, true,
"When logging register values, only print modified registers.")
DEFINE_bool(log_colour, true,
"When logging, try to use coloured output.")
DEFINE_bool(ignore_asm_unimplemented_break, false,
"Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
DEFINE_bool(trace_sim_messages, false,
"Trace simulator debug messages. Implied by --trace-sim.")
// isolate.cc // isolate.cc
DEFINE_bool(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_bool(abort_on_uncaught_exception, false, DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown") "abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(randomize_hashes, true, DEFINE_bool(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions " "randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)") "(with snapshots this option cannot override the baked-in seed)")
@ -814,11 +799,6 @@ DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.") "Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events) DEFINE_implication(log_timer_events, log_internal_timer_events)
DEFINE_implication(log_internal_timer_events, prof) DEFINE_implication(log_internal_timer_events, prof)
DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
DEFINE_string(log_instruction_file, "a64_inst.csv",
"AArch64 instruction statistics log file.")
DEFINE_int(log_instruction_period, 1 << 22,
"AArch64 instruction statistics logging period.")
DEFINE_bool(redirect_code_traces, false, DEFINE_bool(redirect_code_traces, false,
"output deopt information and disassembly into file " "output deopt information and disassembly into file "
@ -826,9 +806,6 @@ DEFINE_bool(redirect_code_traces, false,
DEFINE_string(redirect_code_traces_to, NULL, DEFINE_string(redirect_code_traces_to, NULL,
"output deopt information and disassembly into the given file") "output deopt information and disassembly into the given file")
DEFINE_bool(hydrogen_track_positions, false,
"track source code positions when building IR")
// //
// Disassembler only flags // Disassembler only flags
// //
@ -861,6 +838,8 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
"printing optimized code based on it") "printing optimized code based on it")
DEFINE_bool(print_code_verbose, false, "print more information for code") DEFINE_bool(print_code_verbose, false, "print more information for code")
DEFINE_bool(print_builtin_code, false, "print generated code for builtins") DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
DEFINE_bool(emit_opt_code_positions, false,
"annotate optimize code with source code positions")
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
DEFINE_bool(sodium, false, "print generated code output suitable for use with " DEFINE_bool(sodium, false, "print generated code output suitable for use with "
@ -869,7 +848,7 @@ DEFINE_bool(sodium, false, "print generated code output suitable for use with "
DEFINE_implication(sodium, print_code_stubs) DEFINE_implication(sodium, print_code_stubs)
DEFINE_implication(sodium, print_code) DEFINE_implication(sodium, print_code)
DEFINE_implication(sodium, print_opt_code) DEFINE_implication(sodium, print_opt_code)
DEFINE_implication(sodium, hydrogen_track_positions) DEFINE_implication(sodium, emit_opt_code_positions)
DEFINE_implication(sodium, code_comments) DEFINE_implication(sodium, code_comments)
DEFINE_bool(print_all_code, false, "enable all flags related to printing code") DEFINE_bool(print_all_code, false, "enable all flags related to printing code")

2
deps/v8/src/frames-inl.h

@ -36,8 +36,6 @@
#include "ia32/frames-ia32.h" #include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h" #include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_A64
#include "a64/frames-a64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h" #include "arm/frames-arm.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS

4
deps/v8/src/frames.h

@ -35,11 +35,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#if V8_TARGET_ARCH_A64
typedef uint64_t RegList;
#else
typedef uint32_t RegList; typedef uint32_t RegList;
#endif
// Get the number of registers in a given register list. // Get the number of registers in a given register list.
int NumRegs(RegList list); int NumRegs(RegList list);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save