Browse Source

v8: upgrade to v8 3.20.7

v0.11.5-release
Trevor Norris 12 years ago
parent
commit
5777d7ab30
  1. 58
      deps/v8/ChangeLog
  2. 2
      deps/v8/DEPS
  3. 15
      deps/v8/Makefile
  4. 10
      deps/v8/Makefile.android
  5. 4
      deps/v8/Makefile.nacl
  6. 3
      deps/v8/PRESUBMIT.py
  7. 111
      deps/v8/build/features.gypi
  8. 4
      deps/v8/build/gyp_v8
  9. 15
      deps/v8/build/standalone.gypi
  10. 123
      deps/v8/build/toolchain.gypi
  11. 10
      deps/v8/include/v8-debug.h
  12. 15
      deps/v8/include/v8-profiler.h
  13. 20
      deps/v8/include/v8.h
  14. 2
      deps/v8/preparser/preparser.gyp
  15. 3
      deps/v8/samples/lineprocessor.cc
  16. 4
      deps/v8/samples/process.cc
  17. 16
      deps/v8/samples/samples.gyp
  18. 1
      deps/v8/samples/shell.cc
  19. 40
      deps/v8/src/accessors.cc
  20. 6
      deps/v8/src/accessors.h
  21. 56
      deps/v8/src/api.cc
  22. 2
      deps/v8/src/apinatives.js
  23. 251
      deps/v8/src/arm/assembler-arm.cc
  24. 113
      deps/v8/src/arm/assembler-arm.h
  25. 1
      deps/v8/src/arm/builtins-arm.cc
  26. 506
      deps/v8/src/arm/code-stubs-arm.cc
  27. 81
      deps/v8/src/arm/code-stubs-arm.h
  28. 261
      deps/v8/src/arm/codegen-arm.cc
  29. 43
      deps/v8/src/arm/constants-arm.h
  30. 31
      deps/v8/src/arm/deoptimizer-arm.cc
  31. 230
      deps/v8/src/arm/disasm-arm.cc
  32. 16
      deps/v8/src/arm/full-codegen-arm.cc
  33. 21
      deps/v8/src/arm/ic-arm.cc
  34. 72
      deps/v8/src/arm/lithium-arm.cc
  35. 89
      deps/v8/src/arm/lithium-arm.h
  36. 192
      deps/v8/src/arm/lithium-codegen-arm.cc
  37. 1
      deps/v8/src/arm/lithium-codegen-arm.h
  38. 5
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  39. 28
      deps/v8/src/arm/macro-assembler-arm.cc
  40. 6
      deps/v8/src/arm/macro-assembler-arm.h
  41. 347
      deps/v8/src/arm/simulator-arm.cc
  42. 17
      deps/v8/src/arm/simulator-arm.h
  43. 165
      deps/v8/src/arm/stub-cache-arm.cc
  44. 127
      deps/v8/src/array-iterator.js
  45. 17
      deps/v8/src/assembler.cc
  46. 5
      deps/v8/src/assembler.h
  47. 34
      deps/v8/src/ast.cc
  48. 22
      deps/v8/src/ast.h
  49. 9
      deps/v8/src/atomicops.h
  50. 1
      deps/v8/src/atomicops_internals_x86_gcc.cc
  51. 1
      deps/v8/src/bignum.cc
  52. 30
      deps/v8/src/bootstrapper.cc
  53. 18
      deps/v8/src/builtins.cc
  54. 12
      deps/v8/src/char-predicates-inl.h
  55. 2
      deps/v8/src/char-predicates.h
  56. 6
      deps/v8/src/checks.h
  57. 13
      deps/v8/src/circular-queue-inl.h
  58. 37
      deps/v8/src/circular-queue.cc
  59. 25
      deps/v8/src/circular-queue.h
  60. 238
      deps/v8/src/code-stubs-hydrogen.cc
  61. 200
      deps/v8/src/code-stubs.cc
  62. 377
      deps/v8/src/code-stubs.h
  63. 4
      deps/v8/src/codegen.h
  64. 92
      deps/v8/src/collection.js
  65. 1
      deps/v8/src/compilation-cache.cc
  66. 9
      deps/v8/src/compiler.cc
  67. 2
      deps/v8/src/compiler.h
  68. 6
      deps/v8/src/contexts.cc
  69. 28
      deps/v8/src/conversions-inl.h
  70. 11
      deps/v8/src/conversions.h
  71. 2
      deps/v8/src/counters.cc
  72. 2
      deps/v8/src/cpu-profiler-inl.h
  73. 84
      deps/v8/src/cpu-profiler.cc
  74. 32
      deps/v8/src/cpu-profiler.h
  75. 8
      deps/v8/src/d8-debug.cc
  76. 5
      deps/v8/src/d8-debug.h
  77. 4
      deps/v8/src/d8.cc
  78. 14
      deps/v8/src/d8.gyp
  79. 1
      deps/v8/src/dateparser.cc
  80. 21
      deps/v8/src/debug.cc
  81. 39
      deps/v8/src/deoptimizer.cc
  82. 2
      deps/v8/src/disassembler.cc
  83. 1
      deps/v8/src/elements-kind.cc
  84. 1
      deps/v8/src/execution.cc
  85. 2
      deps/v8/src/extensions/i18n/break-iterator.cc
  86. 3
      deps/v8/src/extensions/i18n/collator.cc
  87. 1
      deps/v8/src/extensions/i18n/i18n-extension.cc
  88. 7
      deps/v8/src/extensions/i18n/i18n-utils.cc
  89. 3
      deps/v8/src/extensions/i18n/locale.cc
  90. 4
      deps/v8/src/extensions/i18n/number-format.cc
  91. 13
      deps/v8/src/factory.cc
  92. 78
      deps/v8/src/factory.h
  93. 18
      deps/v8/src/flag-definitions.h
  94. 6
      deps/v8/src/frames-inl.h
  95. 44
      deps/v8/src/frames.cc
  96. 2
      deps/v8/src/frames.h
  97. 7
      deps/v8/src/full-codegen.cc
  98. 4
      deps/v8/src/full-codegen.h
  99. 1
      deps/v8/src/gdb-jit.cc
  100. 11
      deps/v8/src/global-handles.cc

58
deps/v8/ChangeLog

@ -1,3 +1,61 @@
2013-07-22: Version 3.20.7
Deprecated some debugger methods.
Fixed wrong bailout id in polymorphic stores (Chromium issue 259787).
Fixed data race in SamplingCircularQueue (Chromium issue 251218).
Fixed type feedback in presence of negative lookups
(Chromium issue 252797).
Do not materialize context-allocated values for debug-evaluate
(Chromium issue 259300).
Synchronized Compare-Literal behavior in FullCodegen and Hydrogen
(Chromium issue 260345).
Performance and stability improvements on all platforms.
2013-07-17: Version 3.20.6
Try to remove invalidated stubs before falling back to checking the
constant state (Chromium issue 260585).
Fixed gyp_v8 to work with use_system_icu=1 (issue 2475).
Fixed sloppy-mode 'const' under Harmony flag (Chromium issue 173361).
Use internal array as API function cache (Chromium issue 260106).
Fixed possible stack overflow in range analysis
(Chromium issue 259452).
Performance and stability improvements on all platforms.
2013-07-15: Version 3.20.5
Ensured that the length of frozen arrays is immutable
(issue 2711, Chromium issue 259548).
Performance and stability improvements on all platforms.
2013-07-10: Version 3.20.4
Fixed garbage-collection issue that causes a crash on ARM
(Chromium issue 254570)
Performance and stability improvements on all platforms.
2013-07-08: Version 3.20.3
Performance and stability improvements on all platforms.
2013-07-05: Version 3.20.2
Remove deprecated heap profiler methods from V8 public API

2
deps/v8/DEPS

@ -8,7 +8,7 @@ deps = {
"http://gyp.googlecode.com/svn/trunk@1656",
"v8/third_party/icu":
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@205936",
"https://src.chromium.org/chrome/trunk/deps/third_party/icu46@210659",
}
deps_os = {

15
deps/v8/Makefile

@ -34,6 +34,7 @@ TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/tmp/v8
NACL_SDK_ROOT ?=
@ -91,6 +92,10 @@ endif
ifeq ($(vtunejit), on)
GYPFLAGS += -Dv8_enable_vtunejit=1
endif
# optdebug=on
ifeq ($(optdebug), on)
GYPFLAGS += -Dv8_optimized_debug=1
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
@ -210,9 +215,9 @@ ANDROID_ARCHES = android_ia32 android_arm android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
test/cctest/cctest.gyp tools/gyp/v8.gyp
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
build/toolchain.gypi preparser/preparser.gyp samples/samples.gyp \
src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
@ -352,6 +357,7 @@ clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.cl
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
@ -359,6 +365,7 @@ $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
GYP_GENERATORS=make \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@ -401,4 +408,4 @@ dependencies:
--revision 1656
svn checkout --force \
https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
third_party/icu --revision 205936
third_party/icu --revision 210659

10
deps/v8/Makefile.android

@ -35,11 +35,12 @@ ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux-x86
TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
else
ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = darwin-x86
TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
else
$(error Host platform "${HOST_OS}" is not supported)
endif
@ -67,7 +68,9 @@ endif
TOOLCHAIN_PATH = ${ANDROID_NDK_ROOT}/toolchains/${TOOLCHAIN_ARCH}/prebuilt
ANDROID_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(wildcard $(ANDROID_TOOLCHAIN)),)
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}")
$(error Cannot find Android toolchain in "${ANDROID_TOOLCHAIN}". Please \
check that ANDROID_NDK_ROOT and ANDROID_NDK_HOST_ARCH are set \
correctly)
endif
# For mksnapshot host generation.
@ -93,6 +96,7 @@ $(ANDROID_MAKEFILES):
GYP_DEFINES="${DEFINES}" \
CC="${ANDROID_TOOLCHAIN}/bin/*-gcc" \
CXX="${ANDROID_TOOLCHAIN}/bin/*-g++" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.${ARCH} ${GYPFLAGS}

4
deps/v8/Makefile.nacl

@ -91,6 +91,8 @@ $(NACL_MAKEFILES):
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS) \
-Dwno_array_bounds=-Wno-array-bounds

3
deps/v8/PRESUBMIT.py

@ -44,7 +44,8 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"Copyright header and trailing whitespaces check failed"))
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
return results

111
deps/v8/build/features.gypi

@ -0,0 +1,111 @@
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Compile time controlled V8 features.
{
'variables': {
'v8_compress_startup_data%': 'off',
'v8_enable_debugger_support%': 1,
'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0,
'v8_object_print%': 0,
'v8_enable_verify_heap%': 0,
'v8_use_snapshot%': 'true',
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
'v8_postmortem_support%': 'false',
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
'v8_interpreted_regexp%': 0,
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
'v8_enable_i18n_support%': 0,
},
'target_defaults': {
'conditions': [
['v8_enable_debugger_support==1', {
'defines': ['ENABLE_DEBUGGER_SUPPORT',],
}],
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
],
}],
], # conditions
'configurations': {
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
], # conditions
}, # Release
}, # configurations
}, # target_defaults
}

4
deps/v8/build/gyp_v8

@ -47,6 +47,10 @@ if __name__ == '__main__':
sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
import gyp
# Add paths so that pymod_do_main(...) can import files.
sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
def apply_gyp_environment(file_path=None):
"""

15
deps/v8/build/standalone.gypi

@ -28,11 +28,15 @@
# Definitions to be used when building stand-alone V8 binaries.
{
# We need to include toolchain.gypi here for third-party sources that don't
# directly include it themselves.
'includes': ['toolchain.gypi'],
'variables': {
'component%': 'static_library',
'clang%': 0,
'visibility%': 'hidden',
'v8_enable_backtrace%': 0,
'v8_enable_i18n_support%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@ -128,6 +132,9 @@
},
}],
['OS == "win"', {
'defines!': [
'DEBUG',
],
'msvs_settings': {
'VCCLCompilerTool': {
'WarnAsError': 'false',
@ -218,6 +225,14 @@
# 1 == /SUBSYSTEM:CONSOLE
# 2 == /SUBSYSTEM:WINDOWS
'SubSystem': '1',
'conditions': [
['v8_enable_i18n_support==1', {
'AdditionalDependencies': [
'advapi32.lib',
],
}],
],
},
},
},

123
deps/v8/build/common.gypi → deps/v8/build/toolchain.gypi

@ -1,4 +1,4 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -32,7 +32,6 @@
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
@ -42,14 +41,6 @@
# NaCl V8 builds stop using the ARM simulator
'nacl_target_arch%': 'none', # must be set externally
# Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
# generated by V8 to do unaligned memory access, and setting it to 'false'
# will ensure that the generated code will always do aligned memory
# accesses. The default value of 'default' will try to determine the correct
# setting. Note that for Intel architectures (ia32 and x64) unaligned memory
# access is allowed for all CPUs.
'v8_can_use_unaligned_accesses%': 'default',
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
# ARM target. Leaving the default value of 'false' will avoid the use of
@ -67,21 +58,14 @@
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
'v8_enable_debugger_support%': 1,
'v8_enable_backtrace%': 0,
'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0,
'v8_object_print%': 0,
# Turns on compiler optimizations in Debug builds (#defines are unaffected).
'v8_optimized_debug%': 0,
# Enable profiling support. Only required on Windows.
'v8_enable_prof%': 0,
'v8_enable_verify_heap%': 0,
# Some versions of GCC 4.5 seem to need -fno-strict-aliasing.
'v8_no_strict_aliasing%': 0,
@ -89,49 +73,16 @@
# it's handled in build/standalone.gypi.
'want_separate_host_toolset%': 1,
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
'werror%': '-Werror',
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
'v8_postmortem_support%': 'false',
# For a shared library build, results in "libv8-<(soname_version).so".
'soname_version%': '',
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
'v8_interpreted_regexp%': 0,
# Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library.
'v8_enable_i18n_support%': 0,
# Allow to suppress the array bounds warning (default is no suppression).
'wno_array_bounds%': '',
},
'target_defaults': {
'conditions': [
['v8_enable_debugger_support==1', {
'defines': ['ENABLE_DEBUGGER_SUPPORT',],
}],
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],
['v8_enable_verify_heap==1', {
'defines': ['VERIFY_HEAP',],
}],
['v8_interpreted_regexp==1', {
'defines': ['V8_INTERPRETED_REGEXP',],
}],
['v8_enable_i18n_support==1', {
'defines': ['V8_I18N_SUPPORT',],
}],
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@ -391,11 +342,6 @@
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
],
}],
['OS=="win"', {
'defines': [
'WIN32',
@ -488,9 +434,6 @@
], # conditions
'configurations': {
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
},
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@ -500,14 +443,22 @@
],
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '0',
'conditions': [
['OS=="win" and component=="shared_library"', {
['component=="shared_library"', {
'RuntimeLibrary': '3', # /MDd
}, {
'RuntimeLibrary': '1', # /MTd
}],
['v8_optimized_debug==1', {
'Optimization': '1',
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'BasicRuntimeChecks': '0',
}, {
'Optimization': '0',
}],
],
},
'VCLinkerTool': {
@ -515,12 +466,30 @@
},
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
'-Wnon-virtual-dtor', '-Woverloaded-virtual',
'<(wno_array_bounds)' ],
'conditions': [
['v8_optimized_debug==1', {
'cflags!': [
'-O0',
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O1',
],
}],
['v8_optimized_debug==1 and gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',
],
}],
],
}],
['OS=="linux" and v8_enable_backtrace==1', {
# Support for backtrace_symbols.
@ -542,19 +511,20 @@
}],
['OS=="mac"', {
'xcode_settings': {
'conditions': [
['v8_optimized_debug==1', {
'GCC_OPTIMIZATION_LEVEL': '1', # -O1
'GCC_STRICT_ALIASING': 'YES',
}, {
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
}],
],
},
}],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags!': [
'-O2',
@ -564,6 +534,7 @@
'-fdata-sections',
'-ffunction-sections',
'-O3',
'<(wno_array_bounds)',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
@ -613,7 +584,7 @@
'FavorSizeOrSpeed': '0',
'StringPooling': 'true',
'conditions': [
['OS=="win" and component=="shared_library"', {
['component=="shared_library"', {
'RuntimeLibrary': '2', #/MD
}, {
'RuntimeLibrary': '0', #/MT

10
deps/v8/include/v8-debug.h

@ -245,8 +245,9 @@ class EXPORT Debug {
typedef void (*DebugMessageDispatchHandler)();
// Set a C debug event listener.
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
V8_DEPRECATED(static bool SetDebugEventListener(
EventCallback that,
Handle<Value> data = Handle<Value>()));
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
@ -274,8 +275,9 @@ class EXPORT Debug {
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
static void SetMessageHandler(MessageHandler handler,
bool message_handler_thread = false);
V8_DEPRECATED(static void SetMessageHandler(
MessageHandler handler,
bool message_handler_thread = false));
static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is

15
deps/v8/include/v8-profiler.h

@ -181,18 +181,9 @@ class V8EXPORT CpuProfiler {
*/
int GetProfileCount();
/** Deprecated. Use GetCpuProfile with single parameter. */
V8_DEPRECATED(const CpuProfile* GetCpuProfile(
int index,
Handle<Value> security_token));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(int index);
/** Returns a profile by uid. */
V8_DEPRECATED(const CpuProfile* FindCpuProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>()));
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
@ -206,12 +197,6 @@ class V8EXPORT CpuProfiler {
*/
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/**
* Deprecated. Use StopCpuProfiling with one parameter instead.
*/
V8_DEPRECATED(const CpuProfile* StopCpuProfiling(
Handle<String> title,
Handle<Value> security_token));
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.

20
deps/v8/include/v8.h

@ -2377,6 +2377,7 @@ class V8EXPORT Function : public Object {
};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
#endif
@ -2489,6 +2490,12 @@ class V8EXPORT ArrayBuffer : public Object {
};
#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
#endif
/**
* A base class for an instance of one of "views" over ArrayBuffer,
* including TypedArrays and DataView (ES6 draft 15.13).
@ -2516,6 +2523,9 @@ class V8EXPORT ArrayBufferView : public Object {
V8_INLINE(static ArrayBufferView* Cast(Value* obj));
static const int kInternalFieldCount =
V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
private:
ArrayBufferView();
static void CheckCast(Value* obj);
@ -4689,6 +4699,12 @@ class V8EXPORT V8 {
*/
static int ContextDisposedNotification();
/**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
*/
static bool InitializeICU();
private:
V8();
@ -5383,7 +5399,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 131;
static const int kEmptyStringRootIndex = 132;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -5393,7 +5409,7 @@ class Internals {
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
static const int kJSObjectType = 0xb0;
static const int kJSObjectType = 0xb1;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x88;

2
deps/v8/preparser/preparser.gyp

@ -29,7 +29,7 @@
'variables': {
'v8_code': 1,
},
'includes': ['../build/common.gypi'],
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'preparser',

3
deps/v8/samples/lineprocessor.cc

@ -322,7 +322,9 @@ bool RunCppCycle(v8::Handle<v8::Script> script,
return true;
}
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
int result = RunMain(argc, argv);
v8::V8::Dispose();
return result;
@ -421,6 +423,7 @@ void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(ReadLine());
}
v8::Handle<v8::String> ReadLine() {
const int kBufferSize = 1024 + 1;
char buffer[kBufferSize];

4
deps/v8/samples/process.cc

@ -54,6 +54,7 @@ class HttpRequest {
virtual const string& UserAgent() = 0;
};
/**
* The abstract superclass of http request processors.
*/
@ -72,6 +73,7 @@ class HttpRequestProcessor {
static void Log(const char* event);
};
/**
* An http request processor that is scriptable using JavaScript.
*/
@ -135,6 +137,7 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static Persistent<ObjectTemplate> map_template_;
};
// -------------------------
// --- P r o c e s s o r ---
// -------------------------
@ -624,6 +627,7 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
map<string, string> options;
string file;
ParseOptions(argc, argv, options, &file);

16
deps/v8/samples/samples.gyp

@ -28,8 +28,9 @@
{
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 0,
},
'includes': ['../build/common.gypi'],
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
@ -38,6 +39,19 @@
'include_dirs': [
'../include',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icudata',
],
}],
],
},
'targets': [
{

1
deps/v8/samples/shell.cc

@ -66,6 +66,7 @@ static bool run_shell;
int main(int argc, char* argv[]) {
v8::V8::InitializeICU();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
run_shell = (argc == 1);

40
deps/v8/src/accessors.cc

@ -450,26 +450,23 @@ Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Isolate* isolate = Isolate::Current();
JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return isolate->heap()->undefined_value();
while (!function->should_have_prototype()) {
function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype());
JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
if (function_raw == NULL) return isolate->heap()->undefined_value();
while (!function_raw->should_have_prototype()) {
function_raw = FindInstanceOf<JSFunction>(isolate,
function_raw->GetPrototype());
// There has to be one because we hit the getter.
ASSERT(function != NULL);
ASSERT(function_raw != NULL);
}
if (!function->has_prototype()) {
Object* prototype;
{ MaybeObject* maybe_prototype
= isolate->heap()->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
{ MaybeObject* maybe_result = function->SetPrototype(prototype);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (!function_raw->has_prototype()) {
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw);
Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
JSFunction::SetPrototype(function, proto);
function_raw = *function;
}
return function->prototype();
return function_raw->prototype();
}
@ -503,9 +500,7 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
old_value = isolate->factory()->NewFunctionPrototype(function);
}
Handle<Object> result;
MaybeObject* maybe_result = function->SetPrototype(*value);
if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
JSFunction::SetPrototype(function, value);
ASSERT(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
@ -581,6 +576,13 @@ const AccessorDescriptor Accessors::FunctionName = {
//
Handle<Object> Accessors::FunctionGetArguments(Handle<Object> object) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate, Accessors::FunctionGetArguments(*object, 0), Object);
}
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,

6
deps/v8/src/accessors.h

@ -77,14 +77,12 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
void*);
static Handle<Object> FunctionGetPrototype(Handle<Object> object);
static Handle<Object> FunctionGetArguments(Handle<Object> object);
MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@ -92,8 +90,10 @@ class Accessors : public AllStatic {
private:
// Accessor functions only used through the descriptor.
static MaybeObject* FunctionGetPrototype(Object* object, void*);
static MaybeObject* FunctionGetLength(Object* object, void*);
static MaybeObject* FunctionGetName(Object* object, void*);
static MaybeObject* FunctionGetArguments(Object* object, void*);
static MaybeObject* FunctionGetCaller(Object* object, void*);
MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
Object* value, void*);

56
deps/v8/src/api.cc

@ -45,6 +45,7 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
#include "icu_util.h"
#include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
@ -298,6 +299,7 @@ static inline bool EmptyCheck(const char* location, const v8::Data* obj) {
return (obj == 0) ? ReportEmptyHandle(location) : false;
}
// --- S t a t i c s ---
@ -322,6 +324,7 @@ static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
return ApiCheck(InitializeHelper(isolate), location, "Error initializing V8");
}
// Some initializing API functions are called early and may be
// called on a thread different from static initializer thread.
// If Isolate API is used, Isolate::Enter() will initialize TLS so
@ -401,6 +404,7 @@ enum CompressedStartupDataItems {
kCompressedStartupDataCount
};
int V8::GetCompressedStartupDataCount() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
return kCompressedStartupDataCount;
@ -670,6 +674,7 @@ void V8::DisposeGlobal(i::Object** obj) {
i::GlobalHandles::Destroy(obj);
}
// --- H a n d l e s ---
@ -4422,6 +4427,7 @@ bool String::IsOneByte() const {
return str->HasOnlyOneByteChars();
}
// Helpers for ContainsOnlyOneByteHelper
template<size_t size> struct OneByteMask;
template<> struct OneByteMask<4> {
@ -4435,6 +4441,8 @@ static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1;
static inline bool Unaligned(const uint16_t* chars) {
return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
}
static inline const uint16_t* Align(const uint16_t* chars) {
return reinterpret_cast<uint16_t*>(
reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
@ -5419,6 +5427,11 @@ int v8::V8::ContextDisposedNotification() {
}
bool v8::V8::InitializeICU() {
return i::InitializeICU();
}
const char* v8::V8::GetVersion() {
return i::Version::GetVersion();
}
@ -6281,6 +6294,7 @@ bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
ApiCheck(!obj->is_external(),
@ -6581,6 +6595,7 @@ v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
disallow_heap_allocation_ = new i::DisallowHeapAllocation();
}
v8::AssertNoGCScope::~AssertNoGCScope() {
delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
}
@ -6644,6 +6659,7 @@ void V8::SetCounterFunction(CounterLookupCallback callback) {
isolate->stats_table()->SetCounterFunction(callback);
}
void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return;
@ -6652,6 +6668,7 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
isolate->counters()->ResetHistograms();
}
void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
i::Isolate* isolate = EnterIsolateIfNeeded();
if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return;
@ -6999,6 +7016,7 @@ String::Value::~Value() {
i::DeleteArray(str_);
}
Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "RangeError");
@ -7015,6 +7033,7 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "ReferenceError");
@ -7032,6 +7051,7 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "SyntaxError");
@ -7048,6 +7068,7 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "TypeError");
@ -7064,6 +7085,7 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
return Utils::ToLocal(result);
}
Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "Error");
@ -7437,7 +7459,7 @@ void CpuProfile::Delete() {
i::CpuProfiler* profiler = isolate->cpu_profiler();
ASSERT(profiler != NULL);
profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
if (profiler->GetProfilesCount() == 0 && !profiler->HasDetachedProfiles()) {
if (profiler->GetProfilesCount() == 0) {
// If this was the last profile, clean up all accessory data as well.
profiler->DeleteAllProfiles();
}
@ -7484,27 +7506,9 @@ int CpuProfiler::GetProfileCount() {
}
const CpuProfile* CpuProfiler::GetCpuProfile(int index,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
index));
}
const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(NULL, index));
}
const CpuProfile* CpuProfiler::FindCpuProfile(unsigned uid,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
uid));
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(index));
}
@ -7514,19 +7518,9 @@ void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
}
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
Handle<Value> security_token) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
*Utils::OpenHandle(*title)));
}
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
NULL,
*Utils::OpenHandle(*title)));
}
@ -7562,6 +7556,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
isolate->factory()->InternalizeUtf8String(edge->name()));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
case i::HeapGraphEdge::kWeak:
return ToApiHandle<Number>(
isolate->factory()->NewNumberFromInt(edge->index()));
default: UNREACHABLE();
@ -7808,6 +7803,7 @@ void Testing::SetStressRunType(Testing::StressType type) {
internal::Testing::set_stress_type(type);
}
int Testing::GetStressRuns() {
if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG

2
deps/v8/src/apinatives.js

@ -37,7 +37,7 @@ function CreateDate(time) {
}
var kApiFunctionCache = {};
var kApiFunctionCache = new InternalArray();
var functionCache = kApiFunctionCache;

251
deps/v8/src/arm/assembler-arm.cc

@ -49,6 +49,7 @@ bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
unsigned CpuFeatures::cache_line_size_ = 64;
ExternalReference ExternalReference::cpu_features() {
@ -56,6 +57,7 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
@ -124,6 +126,9 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7;
}
if (FLAG_enable_neon) {
supported_ |= 1u << NEON;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= static_cast<uint64_t>(1) << ARMv7;
@ -156,6 +161,10 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
found_by_runtime_probing_only_ |= 1u << NEON;
}
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
@ -170,12 +179,18 @@ void CpuFeatures::Probe() {
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
CpuImplementer implementer = OS::GetCpuImplementer();
if (implementer == QUALCOMM_IMPLEMENTER &&
FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
CpuPart part = OS::GetCpuPart(implementer);
if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
cache_line_size_ = 32;
}
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
&& OS::ArmCpuHasFeature(VFP32DREGS)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
@ -246,11 +261,12 @@ void CpuFeatures::PrintTarget() {
void CpuFeatures::PrintFeatures() {
printf(
"ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
"ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(NEON),
CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
@ -356,6 +372,7 @@ MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
am_ = am;
}
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
rn_ = rn;
rm_ = rm;
@ -376,6 +393,66 @@ MemOperand::MemOperand(Register rn, Register rm,
}
NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
ASSERT((am == Offset) || (am == PostIndex));
rn_ = rn;
rm_ = (am == Offset) ? pc : sp;
SetAlignment(align);
}
NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
rn_ = rn;
rm_ = rm;
SetAlignment(align);
}
void NeonMemOperand::SetAlignment(int align) {
switch (align) {
case 0:
align_ = 0;
break;
case 64:
align_ = 1;
break;
case 128:
align_ = 2;
break;
case 256:
align_ = 3;
break;
default:
UNREACHABLE();
align_ = 0;
break;
}
}
NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
base_ = base;
switch (registers_count) {
case 1:
type_ = nlt_1;
break;
case 2:
type_ = nlt_2;
break;
case 3:
type_ = nlt_3;
break;
case 4:
type_ = nlt_4;
break;
default:
UNREACHABLE();
type_ = nlt_1;
break;
}
}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@ -677,6 +754,7 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
return instr & kOff12Mask;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -1543,6 +1621,107 @@ void Assembler::bfi(Register dst,
}
void Assembler::pkhbt(Register dst,
Register src1,
const Operand& src2,
Condition cond ) {
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
ASSERT(!dst.is(pc));
ASSERT(!src1.is(pc));
ASSERT(!src2.rm().is(pc));
ASSERT(!src2.rm().is(no_reg));
ASSERT(src2.rs().is(no_reg));
ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
ASSERT(src2.shift_op() == LSL);
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
src2.shift_imm_*B7 | B4 | src2.rm().code());
}
void Assembler::pkhtb(Register dst,
Register src1,
const Operand& src2,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
ASSERT(!dst.is(pc));
ASSERT(!src1.is(pc));
ASSERT(!src2.rm().is(pc));
ASSERT(!src2.rm().is(no_reg));
ASSERT(src2.rs().is(no_reg));
ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
ASSERT(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
asr*B7 | B6 | B4 | src2.rm().code());
}
void Assembler::uxtb(Register dst,
const Operand& src,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
ASSERT(!dst.is(pc));
ASSERT(!src.rm().is(pc));
ASSERT(!src.rm().is(no_reg));
ASSERT(src.rs().is(no_reg));
ASSERT((src.shift_imm_ == 0) ||
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
ASSERT(src.shift_op() == ROR);
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
void Assembler::uxtab(Register dst,
Register src1,
const Operand& src2,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
ASSERT(!dst.is(pc));
ASSERT(!src1.is(pc));
ASSERT(!src2.rm().is(pc));
ASSERT(!src2.rm().is(no_reg));
ASSERT(src2.rs().is(no_reg));
ASSERT((src2.shift_imm_ == 0) ||
(src2.shift_imm_ == 8) ||
(src2.shift_imm_ == 16) ||
(src2.shift_imm_ == 24));
ASSERT(src2.shift_op() == ROR);
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
}
void Assembler::uxtb16(Register dst,
const Operand& src,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
ASSERT(!dst.is(pc));
ASSERT(!src.rm().is(pc));
ASSERT(!src.rm().is(no_reg));
ASSERT(src.rs().is(no_reg));
ASSERT((src.shift_imm_ == 0) ||
(src.shift_imm_ == 8) ||
(src.shift_imm_ == 16) ||
(src.shift_imm_ == 24));
ASSERT(src.shift_op() == ROR);
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
}
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
@ -1640,6 +1819,26 @@ void Assembler::strd(Register src1, Register src2,
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
}
// Preload instructions.
void Assembler::pld(const MemOperand& address) {
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
// 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
// 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
ASSERT(address.rm().is(no_reg));
ASSERT(address.am() == Offset);
int U = B23;
int offset = address.offset();
if (offset < 0) {
offset = -offset;
U = 0;
}
ASSERT(offset < 4096);
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
0xf*B12 | offset);
}
// Load/Store multiple instructions.
void Assembler::ldm(BlockAddrMode am,
Register base,
@ -2074,6 +2273,7 @@ void Assembler::vstm(BlockAddrMode am,
0xA*B8 | count);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
OS::MemCopy(&i, &d, 8);
@ -2082,6 +2282,7 @@ static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
*hi = i >> 32;
}
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
@ -2701,6 +2902,50 @@ void Assembler::vsqrt(const DwVfpRegister dst,
}
// Support for NEON.
void Assembler::vld1(NeonSize size,
const NeonListOperand& dst,
const NeonMemOperand& src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.320.
// 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
ASSERT(CpuFeatures::IsSupported(NEON));
int vd, d;
dst.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
}
void Assembler::vst1(NeonSize size,
const NeonListOperand& src,
const NeonMemOperand& dst) {
// Instruction details available in ARM DDI 0406C.b, A8.8.404.
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
ASSERT(CpuFeatures::IsSupported(NEON));
int vd, d;
src.base().split_code(&vd, &d);
emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
size*B6 | dst.align()*B4 | dst.rm().code());
}
void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.346.
// 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
// 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
ASSERT(CpuFeatures::IsSupported(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
// Pseudo instructions.
void Assembler::nop(int type) {
// ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
@ -2774,6 +3019,7 @@ void Assembler::RecordConstPool(int size) {
#endif
}
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
@ -2894,6 +3140,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
void Assembler::RecordRelocInfo(double data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(pc_, data);

113
deps/v8/src/arm/assembler-arm.h

@ -78,12 +78,15 @@ class CpuFeatures : public AllStatic {
(!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f)));
}
static unsigned cache_line_size() { return cache_line_size_; }
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned found_by_runtime_probing_only_;
static unsigned cache_line_size_;
friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
@ -301,6 +304,36 @@ struct DwVfpRegister {
typedef DwVfpRegister DoubleRegister;
// Quad word NEON register.
struct QwNeonRegister {
static const int kMaxNumRegisters = 16;
static QwNeonRegister from_code(int code) {
QwNeonRegister r = { code };
return r;
}
bool is_valid() const {
return (0 <= code_) && (code_ < kMaxNumRegisters);
}
bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
return code_;
}
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
int code_;
};
typedef QwNeonRegister QuadRegister;
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
const SwVfpRegister s0 = { 0 };
@ -370,6 +403,23 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
const QwNeonRegister q0 = { 0 };
const QwNeonRegister q1 = { 1 };
const QwNeonRegister q2 = { 2 };
const QwNeonRegister q3 = { 3 };
const QwNeonRegister q4 = { 4 };
const QwNeonRegister q5 = { 5 };
const QwNeonRegister q6 = { 6 };
const QwNeonRegister q7 = { 7 };
const QwNeonRegister q8 = { 8 };
const QwNeonRegister q9 = { 9 };
const QwNeonRegister q10 = { 10 };
const QwNeonRegister q11 = { 11 };
const QwNeonRegister q12 = { 12 };
const QwNeonRegister q13 = { 13 };
const QwNeonRegister q14 = { 14 };
const QwNeonRegister q15 = { 15 };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
@ -562,6 +612,42 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
// Class NeonMemOperand represents a memory operand in load and
// store NEON instructions
class NeonMemOperand BASE_EMBEDDED {
public:
// [rn {:align}] Offset
// [rn {:align}]! PostIndex
explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
// [rn {:align}], rm PostIndex
explicit NeonMemOperand(Register rn, Register rm, int align = 0);
Register rn() const { return rn_; }
Register rm() const { return rm_; }
int align() const { return align_; }
private:
void SetAlignment(int align);
Register rn_; // base
Register rm_; // register increment
int align_;
};
// Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED {
public:
explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
DoubleRegister base() const { return base_; }
NeonListType type() const { return type_; }
private:
DoubleRegister base_;
NeonListType type_;
};
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@ -866,6 +952,19 @@ class Assembler : public AssemblerBase {
void bfi(Register dst, Register src, int lsb, int width,
Condition cond = al);
void pkhbt(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void pkhtb(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void uxtb(Register dst, const Operand& src, Condition cond = al);
void uxtab(Register dst, Register src1, const Operand& src2,
Condition cond = al);
void uxtb16(Register dst, const Operand& src, Condition cond = al);
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@ -887,6 +986,9 @@ class Assembler : public AssemblerBase {
Register src2,
const MemOperand& dst, Condition cond = al);
// Preload instructions
void pld(const MemOperand& address);
// Load/Store multiple instructions
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
@ -1097,6 +1199,17 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
// Support for NEON.
// All these APIs support D0 to D31 and Q0 to Q15.
void vld1(NeonSize size,
const NeonListOperand& dst,
const NeonMemOperand& src);
void vst1(NeonSize size,
const NeonListOperand& src,
const NeonMemOperand& dst);
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain

1
deps/v8/src/arm/builtins-arm.cc

@ -200,7 +200,6 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
r3, // Scratch.
r4, // Scratch.
r5, // Scratch.
false, // Is it a Smi?
&not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);

506
deps/v8/src/arm/code-stubs-arm.cc

@ -60,6 +60,16 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -226,8 +236,42 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
void UnaryOpStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(UnaryOpIC_Miss);
}
void StoreGlobalStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r1, r2, r0 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(StoreIC_MissFromStubFailure);
}
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0, r3, r1, r2 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cond);
@ -892,17 +936,10 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Now that we have the types we might as well check for
// internalized-internalized.
Label not_internalized;
STATIC_ASSERT(kInternalizedTag != 0);
__ and_(r2, r2, Operand(kIsNotStringMask | kIsInternalizedMask));
__ cmp(r2, Operand(kInternalizedTag | kStringTag));
__ b(ne, &not_internalized); // r2 (rhs) is not an internalized string
__ and_(r3, r3, Operand(kIsNotStringMask | kIsInternalizedMask));
__ cmp(r3, Operand(kInternalizedTag | kStringTag));
__ b(eq, &return_not_equal); // both rhs and lhs are internalized strings
__ bind(&not_internalized);
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ orr(r2, r2, Operand(r3));
__ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
__ b(eq, &return_not_equal);
}
@ -943,15 +980,15 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
// r2 is object type of rhs.
Label object_test;
STATIC_ASSERT(kInternalizedTag != 0);
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
__ tst(r2, Operand(kIsInternalizedMask));
__ b(eq, possible_strings);
__ tst(r2, Operand(kIsNotInternalizedMask));
__ b(ne, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
__ tst(r3, Operand(kIsInternalizedMask));
__ b(eq, possible_strings);
__ tst(r3, Operand(kIsNotInternalizedMask));
__ b(ne, possible_strings);
// Both are internalized. We already checked they weren't the same pointer
// so they are not equal.
@ -982,7 +1019,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found) {
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
@ -1005,7 +1041,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Isolate* isolate = masm->isolate();
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
__ CheckMap(object,
scratch1,
@ -1038,7 +1073,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ VFPCompareAndSetFlags(d0, d1);
__ b(ne, not_found); // The cache did not contain this value.
__ b(&load_result_from_cache);
}
__ bind(&is_smi);
Register scratch = scratch1;
@ -1050,7 +1084,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Operand(scratch, LSL, kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
Register probe = mask;
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(object, probe);
__ b(ne, not_found);
@ -1072,7 +1105,7 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime);
__ add(sp, sp, Operand(1 * kPointerSize));
__ Ret();
@ -1289,277 +1322,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
stream->Add("UnaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
UnaryOpIC::GetName(operand_type_));
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::Generate(MacroAssembler* masm) {
switch (operand_type_) {
case UnaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case UnaryOpIC::NUMBER:
GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
break;
}
}
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ mov(r3, Operand(r0)); // the operand
__ mov(r2, Operand(Smi::FromInt(op_)));
__ mov(r1, Operand(Smi::FromInt(mode_)));
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
__ Push(r3, r2, r1, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateSmiStubSub(masm);
break;
case Token::BIT_NOT:
GenerateSmiStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
__ bind(&non_smi);
__ bind(&slow);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
Label non_smi;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
Label* non_smi,
Label* slow) {
__ JumpIfNotSmi(r0, non_smi);
// The result of negating zero or the smallest negative smi is not a smi.
__ bic(ip, r0, Operand(0x80000000), SetCC);
__ b(eq, slow);
// Return '0 - value'.
__ rsb(r0, r0, Operand::Zero());
__ Ret();
}
void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
Label* non_smi) {
__ JumpIfNotSmi(r0, non_smi);
// Flip bits and revert inverted smi-tag.
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask));
__ Ret();
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
GenerateTypeTransition(masm);
__ bind(&call_builtin);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
GenerateTypeTransition(masm);
}
void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
Label* slow) {
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
// r0 is a heap number. Get a new heap number in r1.
if (mode_ == UNARY_OVERWRITE) {
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
Label slow_allocate_heapnumber, heapnumber_allocated;
__ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ mov(r1, Operand(r0));
__ pop(r0);
}
__ bind(&heapnumber_allocated);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
__ mov(r0, Operand(r1));
}
__ Ret();
}
void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
Label* slow) {
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
// Convert the heap number in r0 to an untagged integer in r1.
__ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ECMAToInt32(r1, d0, r2, r3, r4, d1);
// Do the bitwise operation and check if the result fits in a smi.
Label try_float;
__ mvn(r1, Operand(r1));
__ cmn(r1, Operand(0x40000000));
__ b(mi, &try_float);
// Tag the result as a smi and we're done.
__ SmiTag(r0, r1);
__ Ret();
// Try to store the result in a heap number.
__ bind(&try_float);
if (mode_ == UNARY_NO_OVERWRITE) {
Label slow_allocate_heapnumber, heapnumber_allocated;
__ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the lower bit of the result (left shifted to look like a smi).
__ mov(r2, Operand(r1, LSL, 31));
// Push the 31 high bits (bit 0 cleared to look like a smi).
__ bic(r1, r1, Operand(1));
__ Push(r2, r1);
__ CallRuntime(Runtime::kNumberAlloc, 0);
__ Pop(r2, r1); // Restore the result.
__ orr(r1, r1, Operand(r2, LSR, 31));
}
__ bind(&heapnumber_allocated);
}
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ Ret();
}
// TODO(svenpanne): Use virtual functions instead of switch.
void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
GenerateGenericStubSub(masm);
break;
case Token::BIT_NOT:
GenerateGenericStubBitNot(masm);
break;
default:
UNREACHABLE();
}
}
void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeSub(masm, &non_smi, &slow);
__ bind(&non_smi);
GenerateHeapNumberCodeSub(masm, &slow);
__ bind(&slow);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
GenerateHeapNumberCodeBitNot(masm, &slow);
__ bind(&slow);
GenerateGenericCodeFallback(masm);
}
void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
// Handle the slow case by jumping to the JavaScript builtin.
__ push(r0);
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
// Generates code to call a C function to do a double operation.
// This code never falls through, but returns with a heap number containing
// the result in r0.
@ -2135,8 +1897,8 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
StringAddStub string_add_stub((StringAddFlags)
(ERECT_FRAME | NO_STRING_CHECK_IN_STUB));
StringAddStub string_add_stub(
(StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
@ -2494,8 +2256,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &left_not_string);
StringAddStub string_add_left_stub((StringAddFlags)
(ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB));
StringAddStub string_add_left_stub(
(StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
@ -2505,8 +2267,8 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
StringAddStub string_add_right_stub((StringAddFlags)
(ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB));
StringAddStub string_add_right_stub(
(StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
@ -3001,6 +2763,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
}
@ -3640,7 +3403,8 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@ -3671,7 +3435,8 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
support_wrapper_);
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@ -3741,7 +3506,8 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
__ bind(&miss);
StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind()));
StubCompiler::TailCallBuiltin(
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
}
@ -4649,20 +4415,17 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// function without changing the state.
__ cmp(r3, r1);
__ b(eq, &done);
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &done);
// Special handling of the Array() function, which caches not only the
// monomorphic Array function but the initial ElementsKind with special
// sentinels
__ JumpIfNotSmi(r3, &miss);
if (FLAG_debug_code) {
Handle<Object> terminal_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
LAST_FAST_ELEMENTS_KIND);
__ cmp(r3, Operand(terminal_kind_sentinel));
__ Assert(le, "Array function sentinel is not an ElementsKind");
}
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in ecx.
Handle<Map> allocation_site_map(
masm->isolate()->heap()->allocation_site_map(),
masm->isolate());
__ ldr(r5, FieldMemOperand(r3, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &miss);
// Make sure the function is the Array() function
__ LoadArrayFunction(r3);
@ -4691,14 +4454,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ cmp(r1, r3);
__ b(ne, &not_array_function);
// The target function is the Array constructor, install a sentinel value in
// the constructor's type info cell that will track the initial ElementsKind
// that should be used for the array when its constructed.
Handle<Object> initial_kind_sentinel =
TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
GetInitialFastElementsKind());
__ mov(r3, Operand(initial_kind_sentinel));
__ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the cell
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r0);
__ push(r1);
__ push(r2);
CreateAllocationSiteStub create_stub;
__ CallStub(&create_stub);
__ pop(r2);
__ pop(r1);
__ pop(r0);
}
__ b(&done);
__ bind(&not_array_function);
@ -5723,7 +5494,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
if ((flags_ & NO_STRING_ADD_FLAGS) != 0) {
// Otherwise, at least one of the arguments is definitely a string,
// and we convert the one that is not known to be a string.
if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
__ JumpIfEitherSmi(r0, r1, &call_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -5735,21 +5510,17 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &call_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
} else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
GenerateConvertArgument(
masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
builtin_id = Builtins::STRING_ADD_RIGHT;
} else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
} else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
GenerateConvertArgument(
masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
builtin_id = Builtins::STRING_ADD_LEFT;
}
}
// Both arguments are strings.
// r0: first string
@ -5796,7 +5567,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ASCII strings.
if (flags_ != NO_STRING_ADD_FLAGS) {
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -5844,7 +5615,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ASCII the result is an ASCII cons string.
if (flags_ != NO_STRING_ADD_FLAGS) {
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -5927,7 +5698,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r6: sum of lengths.
Label first_prepared, second_prepared;
__ bind(&string_add_flat_result);
if (flags_ != NO_STRING_ADD_FLAGS) {
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -6015,7 +5786,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
if ((flags_ & ERECT_FRAME) != 0) {
if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame
{
@ -6030,7 +5801,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
if ((flags_ & ERECT_FRAME) != 0) {
if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
GenerateRegisterArgsPop(masm);
// Build a frame
{
@ -6082,7 +5853,6 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
scratch2,
scratch3,
scratch4,
false,
&not_cached);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
@ -6223,14 +5993,9 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp1, Operand(kIsNotStringMask | kIsInternalizedMask));
__ cmp(tmp1, Operand(kInternalizedTag | kStringTag));
__ b(ne, &miss);
__ and_(tmp2, tmp2, Operand(kIsNotStringMask | kIsInternalizedMask));
__ cmp(tmp2, Operand(kInternalizedTag | kStringTag));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ orr(tmp1, tmp1, Operand(tmp2));
__ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
__ b(ne, &miss);
// Internalized strings are compared by identity.
@ -6264,7 +6029,6 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
STATIC_ASSERT(kInternalizedTag != 0);
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
@ -6330,13 +6094,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// strings.
if (equality) {
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp3, tmp1, Operand(tmp2));
__ tst(tmp3, Operand(kIsInternalizedMask));
STATIC_ASSERT(kInternalizedTag == 0);
__ orr(tmp3, tmp1, Operand(tmp2));
__ tst(tmp3, Operand(kIsNotInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
__ Ret(ne);
__ Ret(eq);
}
// Check that both strings are sequential ASCII.
@ -6711,6 +6475,7 @@ struct AheadOfTimeWriteBarrierStubList {
RememberedSetAction action;
};
#define REG(Name) { kRegister_ ## Name ## _Code }
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
@ -7181,10 +6946,6 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
Label normal_sequence;
@ -7196,18 +6957,19 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
__ b(eq, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry
// Fix kind and retry (only if we have an allocation site in the cell).
__ add(r3, r3, Operand(1));
__ cmp(r2, Operand(undefined_sentinel));
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &normal_sequence);
// The type cell may have gone megamorphic, don't overwrite if so
__ ldr(r5, FieldMemOperand(r2, kPointerSize));
__ JumpIfNotSmi(r5, &normal_sequence);
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r5, FieldMemOperand(r5, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &normal_sequence);
// Save the resulting elements kind in type info
__ SmiTag(r3);
__ str(r3, FieldMemOperand(r2, kPointerSize));
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
__ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ bind(&normal_sequence);
@ -7236,7 +6998,7 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
@ -7277,10 +7039,6 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@ -7296,7 +7054,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// We should either have undefined in ebx or a valid cell
Label okay_here;
Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
__ cmp(r2, Operand(undefined_sentinel));
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &okay_here);
__ ldr(r3, FieldMemOperand(r2, 0));
__ cmp(r3, Operand(cell_map));
@ -7306,10 +7064,20 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
Label no_info, switch_ready;
// Get the elements kind and case on that.
__ cmp(r2, Operand(undefined_sentinel));
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
__ JumpIfNotSmi(r3, &no_info);
// The type cell may have undefined in its value.
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
// The type cell has either an AllocationSite or a JSFunction
__ ldr(r4, FieldMemOperand(r3, 0));
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &no_info);
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(r3);
__ jmp(&switch_ready);
__ bind(&no_info);

81
deps/v8/src/arm/code-stubs-arm.h

@ -80,71 +80,6 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
};
class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(operand_type) {
}
private:
Token::Value op_;
UnaryOverwriteMode mode_;
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
Major MajorKey() { return UnaryOp; }
int MinorKey() {
return ModeBits::encode(mode_)
| OpBits::encode(op_)
| OperandTypeInfoBits::encode(operand_type_);
}
// Note: A lot of the helper functions below will vanish when we use virtual
// function instead of switch more often.
void Generate(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateSmiStubSub(MacroAssembler* masm);
void GenerateSmiStubBitNot(MacroAssembler* masm);
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateNumberStub(MacroAssembler* masm);
void GenerateNumberStubSub(MacroAssembler* masm);
void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateGenericStubSub(MacroAssembler* masm);
void GenerateGenericStubBitNot(MacroAssembler* masm);
void GenerateGenericCodeFallback(MacroAssembler* masm);
virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
return UnaryOpIC::ToState(operand_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@ -209,21 +144,6 @@ class StringHelper : public AllStatic {
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 1 << 0,
// Omit left string check in stub (left is definitely a string).
NO_STRING_CHECK_LEFT_IN_STUB = 1 << 1,
// Omit right string check in stub (right is definitely a string).
NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 2,
// Stub needs a frame before calling the runtime
ERECT_FRAME = 1 << 3,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@ -352,7 +272,6 @@ class NumberToStringStub: public PlatformCodeStub {
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found);
private:

261
deps/v8/src/arm/codegen-arm.cc

@ -112,6 +112,252 @@ UnaryMathFunction CreateExpFunction() {
#endif
}
#if defined(V8_HOST_ARCH_ARM)
OS::MemCopyUint8Function CreateMemCopyUint8Function(
OS::MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
return stub;
}
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
Register dest = r0;
Register src = r1;
Register chars = r2;
Register temp1 = r3;
Label less_4;
if (CpuFeatures::IsSupported(NEON)) {
Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
Label size_less_than_8;
__ pld(MemOperand(src, 0));
__ cmp(chars, Operand(8));
__ b(lt, &size_less_than_8);
__ cmp(chars, Operand(32));
__ b(lt, &less_32);
if (CpuFeatures::cache_line_size() == 32) {
__ pld(MemOperand(src, 32));
}
__ cmp(chars, Operand(64));
__ b(lt, &less_64);
__ pld(MemOperand(src, 64));
if (CpuFeatures::cache_line_size() == 32) {
__ pld(MemOperand(src, 96));
}
__ cmp(chars, Operand(128));
__ b(lt, &less_128);
__ pld(MemOperand(src, 128));
if (CpuFeatures::cache_line_size() == 32) {
__ pld(MemOperand(src, 160));
}
__ pld(MemOperand(src, 192));
if (CpuFeatures::cache_line_size() == 32) {
__ pld(MemOperand(src, 224));
}
__ cmp(chars, Operand(256));
__ b(lt, &less_256);
__ sub(chars, chars, Operand(256));
__ bind(&loop);
__ pld(MemOperand(src, 256));
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
if (CpuFeatures::cache_line_size() == 32) {
__ pld(MemOperand(src, 256));
}
__ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
__ sub(chars, chars, Operand(64), SetCC);
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
__ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
__ b(ge, &loop);
__ add(chars, chars, Operand(256));
__ bind(&less_256);
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
__ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
__ sub(chars, chars, Operand(128));
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
__ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
__ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
__ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
__ cmp(chars, Operand(64));
__ b(lt, &less_64);
__ bind(&less_128);
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
__ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
__ sub(chars, chars, Operand(64));
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
__ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
__ bind(&less_64);
__ cmp(chars, Operand(32));
__ b(lt, &less_32);
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
__ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
__ sub(chars, chars, Operand(32));
__ bind(&less_32);
__ cmp(chars, Operand(16));
__ b(le, &_16_or_less);
__ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
__ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
__ sub(chars, chars, Operand(16));
__ bind(&_16_or_less);
__ cmp(chars, Operand(8));
__ b(le, &_8_or_less);
__ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
__ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
__ sub(chars, chars, Operand(8));
// Do a last copy which may overlap with the previous copy (up to 8 bytes).
__ bind(&_8_or_less);
__ rsb(chars, chars, Operand(8));
__ sub(src, src, Operand(chars));
__ sub(dest, dest, Operand(chars));
__ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
__ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
__ Ret();
__ bind(&size_less_than_8);
__ bic(temp1, chars, Operand(0x3), SetCC);
__ b(&less_4, eq);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
} else {
Register temp2 = ip;
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
__ b(&less_4, eq);
__ add(temp2, dest, temp2);
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
__ cmp(dest, temp2);
__ b(&loop, ne);
}
__ bind(&less_4);
__ mov(chars, Operand(chars, LSL, 31), SetCC);
// bit0 => Z (ne), bit1 => C (cs)
__ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
__ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
__ ldrb(temp1, MemOperand(src), ne);
__ strb(temp1, MemOperand(dest), ne);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
#endif
}
// Convert 8 to 16. The number of character to copy must be at least 8.
OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
OS::MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
if (Serializer::enabled() || !CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
return stub;
}
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return stub;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
Register dest = r0;
Register src = r1;
Register chars = r2;
if (CpuFeatures::IsSupported(NEON)) {
Register temp = r3;
Label loop;
__ bic(temp, chars, Operand(0x7));
__ sub(chars, chars, Operand(temp));
__ add(temp, dest, Operand(temp, LSL, 1));
__ bind(&loop);
__ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
__ vmovl(NeonU8, q0, d0);
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
__ cmp(dest, temp);
__ b(&loop, ne);
// Do a last copy which will overlap with the previous copy (1 to 8 bytes).
__ rsb(chars, chars, Operand(8));
__ sub(src, src, Operand(chars));
__ sub(dest, dest, Operand(chars, LSL, 1));
__ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
__ vmovl(NeonU8, q0, d0);
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
__ Ret();
} else {
Register temp1 = r3;
Register temp2 = ip;
Register temp3 = lr;
Register temp4 = r4;
Label loop;
Label not_two;
__ Push(lr, r4);
__ bic(temp2, chars, Operand(0x3));
__ add(temp2, dest, Operand(temp2, LSL, 1));
__ bind(&loop);
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ uxtb16(temp3, Operand(temp1, ROR, 0));
__ uxtb16(temp4, Operand(temp1, ROR, 8));
__ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
__ str(temp1, MemOperand(dest));
__ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
__ str(temp1, MemOperand(dest, 4));
__ add(dest, dest, Operand(8));
__ cmp(dest, temp2);
__ b(&loop, ne);
__ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
__ b(&not_two, cc);
__ ldrh(temp1, MemOperand(src, 2, PostIndex));
__ uxtb(temp3, Operand(temp1, ROR, 8));
__ mov(temp3, Operand(temp3, LSL, 16));
__ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
__ str(temp3, MemOperand(dest, 4, PostIndex));
__ bind(&not_two);
__ ldrb(temp1, MemOperand(src), ne);
__ strh(temp1, MemOperand(dest), ne);
__ Pop(pc, r4);
}
CodeDesc desc;
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
#undef __
@ -120,6 +366,7 @@ UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@ -144,7 +391,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, AllocationSiteMode mode,
Label* allocation_site_info_found) {
Label* allocation_memento_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@ -154,9 +401,9 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- r4 : scratch (elements)
// -----------------------------------
if (mode == TRACK_ALLOCATION_SITE) {
ASSERT(allocation_site_info_found != NULL);
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ b(eq, allocation_site_info_found);
ASSERT(allocation_memento_found != NULL);
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, allocation_memento_found);
}
// Set transitioned map.
@ -185,7 +432,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
}
@ -311,7 +558,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required, only_change_map;
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
__ TestJSArrayForAllocationMemento(r2, r4);
__ b(eq, fail);
}
@ -591,7 +838,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
patcher.masm()->nop(ip.code());
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}

43
deps/v8/src/arm/constants-arm.h

@ -33,22 +33,6 @@
#error ARM EABI support is required.
#endif
#if defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
# define CAN_USE_ARMV7_INSTRUCTIONS 1
#ifndef CAN_USE_VFP3_INSTRUCTIONS
# define CAN_USE_VFP3_INSTRUCTIONS
#endif
#endif
// Simulator should support unaligned access by default.
#if !defined(__arm__)
# ifndef CAN_USE_UNALIGNED_ACCESSES
# define CAN_USE_UNALIGNED_ACCESSES 1
# endif
#endif
namespace v8 {
namespace internal {
@ -331,6 +315,32 @@ enum LFlag {
};
// NEON data type
enum NeonDataType {
NeonS8 = 0x1, // U = 0, imm3 = 0b001
NeonS16 = 0x2, // U = 0, imm3 = 0b010
NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonDataTypeSizeMask = 0x7,
NeonDataTypeUMask = 1 << 24
};
enum NeonListType {
nlt_1 = 0x7,
nlt_2 = 0xA,
nlt_3 = 0x6,
nlt_4 = 0x2
};
enum NeonSize {
Neon8 = 0x0,
Neon16 = 0x1,
Neon32 = 0x2,
Neon64 = 0x4
};
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@ -573,6 +583,7 @@ class Instruction {
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
inline int SpecialValue() const { return Bits(27, 23); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);

31
deps/v8/src/arm/deoptimizer-arm.cc

@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 16;
const int Deoptimizer::table_entry_size_ = 12;
int Deoptimizer::patch_size() {
@ -465,22 +465,12 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object if possible (r3) (return
// Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
if (type() == EAGER || type() == SOFT) {
__ mov(r3, Operand::Zero());
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
__ mov(r3, lr);
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(r3, lr);
// Correct two words for bailout id and return address.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@ -521,13 +511,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
if (type() == EAGER || type() == SOFT || type() == OSR) {
// Remove the bailout id and the saved registers from the stack.
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@ -636,18 +621,12 @@ void Deoptimizer::EntryGenerator::Generate() {
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// Create a sequence of deoptimization entries. Note that any
// registers may be still live.
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
if (type() == EAGER || type() == SOFT) {
__ nop();
} else {
// Emulate ia32 like call by pushing return address to stack.
__ push(lr);
}
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);

230
deps/v8/src/arm/disasm-arm.cc

@ -113,6 +113,8 @@ class Decoder {
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
void FormatNeonList(int Vd, int type);
void FormatNeonMemory(int Rn, int align, int Rm);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@ -133,6 +135,8 @@ class Decoder {
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeSpecialCondition(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
@ -187,11 +191,13 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(VFPRegisters::Name(reg, false));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
@ -417,6 +423,41 @@ int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
}
void Decoder::FormatNeonList(int Vd, int type) {
if (type == nlt_1) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d}", Vd);
} else if (type == nlt_2) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d}", Vd, Vd + 1);
} else if (type == nlt_3) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
} else if (type == nlt_4) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
}
}
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"[r%d", Rn);
if (align != 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
":%d", (1 << align) << 6);
}
if (Rm == 15) {
Print("]");
} else if (Rm == 13) {
Print("]!");
} else {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"], r%d", Rm);
}
}
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
@ -980,15 +1021,107 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
if (instr->HasW()) {
VERIFY(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
if (instr->Bit(4) == 0) {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
} else {
if (instr->Bit(5) == 0) {
switch (instr->Bits(22, 21)) {
case 0:
if (instr->Bit(20) == 0) {
if (instr->Bit(6) == 0) {
Format(instr, "pkhbt'cond 'rd, 'rn, 'rm, lsl #'imm05@07");
} else {
if (instr->Bits(11, 7) == 0) {
Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #32");
} else {
Format(instr, "pkhtb'cond 'rd, 'rn, 'rm, asr #'imm05@07");
}
}
} else {
UNREACHABLE();
}
break;
case 1:
UNREACHABLE();
break;
case 2:
UNREACHABLE();
break;
case 3:
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
break;
}
} else {
switch (instr->Bits(22, 21)) {
case 0:
UNREACHABLE();
break;
case 1:
UNREACHABLE();
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #0");
break;
case 1:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtb16'cond 'rd, 'rm, ror #24");
break;
}
} else {
UNREACHABLE(); // SSAT.
UNREACHABLE();
}
} else {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
UNREACHABLE();
}
break;
case 3:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtb'cond 'rd, 'rm, ror #0");
break;
case 1:
Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
break;
}
} else {
switch (instr->Bits(11, 10)) {
case 0:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #0");
break;
case 1:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
break;
case 2:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
break;
case 3:
Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
break;
}
}
} else {
UNREACHABLE();
}
break;
}
}
}
break;
}
@ -1421,6 +1554,91 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
}
break;
case 7:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
} else {
Unknown(instr);
}
break;
case 8:
if (instr->Bits(21, 20) == 0) {
// vst1
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Rn = instr->VnValue();
int type = instr->Bits(11, 8);
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"vst1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
} else if (instr->Bits(21, 20) == 2) {
// vld1
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Rn = instr->VnValue();
int type = instr->Bits(11, 8);
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"vld1.%d ", (1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
} else {
Unknown(instr);
}
break;
case 0xA:
case 0xB:
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
int Rn = instr->Bits(19, 16);
int offset = instr->Bits(11, 0);
if (offset == 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d]", Rn);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #-%d]", Rn, offset);
} else {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #+%d]", Rn, offset);
}
} else {
Unknown(instr);
}
break;
default:
Unknown(instr);
break;
}
}
#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
@ -1447,7 +1665,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
Unknown(instr);
DecodeSpecialCondition(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));

16
deps/v8/src/arm/full-codegen-arm.cc

@ -169,9 +169,7 @@ void FullCodeGenerator::Generate() {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
@ -181,10 +179,13 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
}
}
bool function_in_register = true;
@ -3718,7 +3719,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
StringAddStub stub(STRING_ADD_CHECK_BOTH);
__ CallStub(&stub);
context()->Plug(r0);
}
@ -4366,10 +4367,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
const char* comment) {
// TODO(svenpanne): Allowing format strings in Comment would be nice here...
Comment cmt(masm_, comment);
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
UnaryOverwriteMode overwrite =
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
UnaryOpStub stub(expr->op(), overwrite);
UnaryOpStub stub(expr->op());
// UnaryOpStub expects the argument to be in the
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
@ -4438,7 +4436,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
if (ShouldInlineSmiCase(expr->op())) {
__ JumpIfSmi(r0, &no_conversion);
}
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);

21
deps/v8/src/arm/ic-arm.cc

@ -325,9 +325,9 @@ static void GenerateKeyNameCheck(MacroAssembler* masm,
// bit test is enough.
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag != 0);
__ tst(hash, Operand(kIsInternalizedMask));
__ b(eq, not_unique);
STATIC_ASSERT(kInternalizedTag == 0);
__ tst(hash, Operand(kIsNotInternalizedMask));
__ b(ne, not_unique);
__ bind(&unique);
}
@ -1230,7 +1230,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(r0, r2);
@ -1253,7 +1253,7 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
AllocationSiteMode mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(r0, r2);
@ -1384,7 +1384,7 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -1398,7 +1398,7 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -1414,7 +1414,7 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1531,8 +1531,9 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
Code::Flags flags = Code::ComputeFlags(
Code::STUB, MONOMORPHIC, strict_mode,
Code::NORMAL, Code::STORE_IC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5, r6);

72
deps/v8/src/arm/lithium-arm.cc

@ -182,7 +182,7 @@ void LBranch::PrintDataTo(StringStream* stream) {
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
@ -272,6 +272,24 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
ExternalReference LLinkObjectInList::GetReference(Isolate* isolate) {
switch (hydrogen()->known_list()) {
case HLinkObjectInList::ALLOCATION_SITE_LIST:
return ExternalReference::allocation_sites_list_address(isolate);
}
UNREACHABLE();
// Return a dummy value
return ExternalReference::isolate_address(isolate);
}
void LLinkObjectInList::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" offset %d", hydrogen()->store_field().offset());
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@ -325,7 +343,6 @@ void LCallNewArray::PrintDataTo(StringStream* stream) {
stream->Add("= ");
constructor()->PrintTo(stream);
stream->Add(" #%d / ", arity());
ASSERT(hydrogen()->property_cell()->value()->IsSmi());
ElementsKind kind = hydrogen()->elements_kind();
stream->Add(" (%s) ", ElementsKindToString(kind));
}
@ -1685,8 +1702,8 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
HCompareNumericAndBranch* instr) {
Representation r = instr->representation();
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
@ -1694,14 +1711,14 @@ LInstruction* LChunkBuilder::DoCompareIDAndBranch(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
return new(zone()) LCompareNumericAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
return new(zone()) LCompareNumericAndBranch(left, right);
}
}
@ -1998,6 +2015,18 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) {
return new(zone())
LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new(zone()) LCheckInstanceType(value);
@ -2102,6 +2131,13 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
}
LInstruction* LChunkBuilder::DoLinkObjectInList(HLinkObjectInList* instr) {
LOperand* object = UseRegister(instr->value());
LLinkObjectInList* result = new(zone()) LLinkObjectInList(object);
return result;
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@ -2389,14 +2425,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@ -2419,14 +2447,6 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* key = UseFixed(instr->key(), r1);
LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
ASSERT(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
@ -2599,14 +2619,6 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseRegisterAtStart(instr->object());
LIn* result = new(zone()) LIn(key, object);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
LOperand* object = UseFixed(instr->enumerable(), r0);
LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);

89
deps/v8/src/arm/lithium-arm.h

@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@ -81,7 +80,7 @@ class LCodeGen;
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpConstantEqAndBranch) \
V(CmpIDAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
@ -92,7 +91,6 @@ class LCodeGen;
V(Context) \
V(DebugBreak) \
V(DeclareGlobals) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
@ -106,7 +104,6 @@ class LCodeGen;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstanceSize) \
@ -118,10 +115,12 @@ class LCodeGen;
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsNumberAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(Label) \
V(LazyBailout) \
V(LinkObjectInList) \
V(LoadContextSlot) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
@ -719,9 +718,9 @@ class LDebugBreak: public LTemplateInstruction<0, 0, 0> {
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
class LCompareNumericAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
@ -729,8 +728,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
"compare-numeric-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@ -925,6 +925,19 @@ class LIsObjectAndBranch: public LControlInstruction<1, 1> {
};
class LIsNumberAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNumberAndBranch(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch)
};
class LIsStringAndBranch: public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@ -1671,6 +1684,23 @@ class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
};
class LLinkObjectInList: public LTemplateInstruction<0, 1, 0> {
public:
explicit LLinkObjectInList(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
ExternalReference GetReference(Isolate* isolate);
DECLARE_CONCRETE_INSTRUCTION(LinkObjectInList, "link-object-in-list")
DECLARE_HYDROGEN_ACCESSOR(LinkObjectInList)
virtual void PrintDataTo(StringStream* stream);
};
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@ -2450,21 +2480,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
temps_[1] = temp2;
}
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
@ -2551,20 +2566,6 @@ class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
};
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* object, LOperand* key) {
inputs_[0] = object;
inputs_[1] = key;
}
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
};
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry() {}
@ -2586,20 +2587,6 @@ class LStackCheck: public LTemplateInstruction<0, 0, 0> {
};
class LIn: public LTemplateInstruction<1, 2, 0> {
public:
LIn(LOperand* key, LOperand* object) {
inputs_[0] = key;
inputs_[1] = object;
}
LOperand* key() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {

192
deps/v8/src/arm/lithium-codegen-arm.cc

@ -161,9 +161,7 @@ bool LCodeGen::GeneratePrologue() {
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ nop(ip.code());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
@ -343,8 +341,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
Label table_start;
__ bind(&table_start);
Label needs_frame_not_call;
Label needs_frame_is_call;
Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
@ -357,11 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
}
if (deopt_jump_table_[i].needs_frame) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
if (type == Deoptimizer::LAZY) {
if (needs_frame_is_call.is_bound()) {
__ b(&needs_frame_is_call);
if (needs_frame.is_bound()) {
__ b(&needs_frame);
} else {
__ bind(&needs_frame_is_call);
__ bind(&needs_frame);
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
@ -374,28 +370,8 @@ bool LCodeGen::GenerateDeoptJumpTable() {
__ mov(pc, ip);
}
} else {
if (needs_frame_not_call.is_bound()) {
__ b(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ add(fp, sp, Operand(2 * kPointerSize));
__ mov(pc, ip);
}
}
} else {
if (type == Deoptimizer::LAZY) {
__ mov(lr, Operand(pc), LeaveCC, al);
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
} else {
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
}
masm()->CheckConstPool(false, false);
}
@ -794,7 +770,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_deopt_every_n_times == 1 &&
!info()->IsStub() &&
info()->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
ASSERT(frame_is_built_);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@ -803,13 +780,8 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(info()->IsStub() || frame_is_built_);
bool needs_lazy_deopt = info()->IsStub();
if (cc == al && frame_is_built_) {
if (needs_lazy_deopt) {
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
}
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
@ -1069,11 +1041,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
@ -2130,12 +2097,12 @@ int LCodeGen::GetNextEmittedBlock() const {
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int right_block = instr->FalseDestination(chunk_);
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
if (right_block == left_block) {
if (right_block == left_block || cc == al) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
@ -2153,6 +2120,25 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
}
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsSmiOrInteger32() || r.IsDouble()) {
EmitBranch(instr, al);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsTaggedNumber()) {
EmitBranch(instr, al);
}
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
__ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
__ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
EmitBranch(instr, eq);
}
}
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32() || r.IsSmi()) {
@ -2329,7 +2315,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Condition cond = TokenToCondition(instr->op(), false);
@ -2937,6 +2923,19 @@ void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
}
void LCodeGen::DoLinkObjectInList(LLinkObjectInList* instr) {
Register object = ToRegister(instr->object());
ExternalReference sites_list_address = instr->GetReference(isolate());
__ mov(ip, Operand(sites_list_address));
__ ldr(ip, MemOperand(ip));
__ str(ip, FieldMemOperand(object,
instr->hydrogen()->store_field().offset()));
__ mov(ip, Operand(sites_list_address));
__ str(object, MemOperand(ip));
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@ -4123,7 +4122,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
__ mov(r2, Operand(instr->hydrogen()->property_cell()));
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE)
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
@ -4527,7 +4526,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
__ TestJSArrayForAllocationSiteInfo(object, temp);
__ TestJSArrayForAllocationMemento(object, temp);
DeoptimizeIf(eq, instr->environment());
}
@ -4535,7 +4534,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
StringAddStub stub(instr->hydrogen()->flags());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@ -5321,80 +5320,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
class DeferredAllocateObject: public LDeferredCode {
public:
DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred =
new(zone()) DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->temp());
Register scratch2 = ToRegister(instr->temp2());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0);
__ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
Label is_in_new_space;
__ JumpIfInNewSpace(result, scratch, &is_in_new_space);
__ Abort("Allocated object is not in new-space");
__ bind(&is_in_new_space);
}
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
__ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
// Initialize map and fields of the newly allocated object.
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
__ str(map, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
__ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
if (initial_map->inobject_properties() != 0) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = JSObject::kHeaderSize + i * kPointerSize;
__ str(scratch, FieldMemOperand(result, property_offset));
}
}
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map();
int instance_size = initial_map->instance_size();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(r0, Operand(Smi::FromInt(instance_size)));
__ push(r0);
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate: public LDeferredCode {
public:
@ -5712,33 +5637,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
Register strict = scratch0();
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoIn(LIn* instr) {
Register obj = ToRegister(instr->object());
Register key = ToRegister(instr->key());
__ Push(key, obj);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);

1
deps/v8/src/arm/lithium-codegen-arm.h

@ -150,7 +150,6 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);

5
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -219,7 +219,6 @@ void LGapResolver::EmitMove(int index) {
ASSERT(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
@ -255,6 +254,10 @@ void LGapResolver::EmitMove(int index) {
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister result = cgen_->ToDoubleRegister(destination);
double v = cgen_->ToDouble(constant_source);
__ Vmov(result, v, ip);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.

28
deps/v8/src/arm/macro-assembler-arm.cc

@ -1033,6 +1033,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
if (use_eabi_hardfloat()) {
Move(dst, d0);
@ -3092,11 +3093,14 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
void MacroAssembler::JumpIfNotUniqueName(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(((SYMBOL_TYPE - 1) & kIsInternalizedMask) == kInternalizedTag);
cmp(reg, Operand(kInternalizedTag));
b(lt, not_unique_name);
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
b(eq, &succeed);
cmp(reg, Operand(SYMBOL_TYPE));
b(gt, not_unique_name);
b(ne, not_unique_name);
bind(&succeed);
}
@ -3746,26 +3750,26 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
}
void MacroAssembler::TestJSArrayForAllocationSiteInfo(
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg) {
Label no_info_available;
Label no_memento_available;
ExternalReference new_space_start =
ExternalReference::new_space_start(isolate());
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
add(scratch_reg, receiver_reg,
Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
cmp(scratch_reg, Operand(new_space_start));
b(lt, &no_info_available);
b(lt, &no_memento_available);
mov(ip, Operand(new_space_allocation_top));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
b(gt, &no_info_available);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
b(gt, &no_memento_available);
ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
cmp(scratch_reg,
Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
bind(&no_info_available);
Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
bind(&no_memento_available);
}

6
deps/v8/src/arm/macro-assembler-arm.h

@ -1334,13 +1334,13 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
// AllocationSiteInfo support. Arrays may have an associated
// AllocationSiteInfo object that can be checked for in order to pretransition
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
// to another type.
// On entry, receiver_reg should point to the array object.
// scratch_reg gets clobbered.
// If allocation info is present, condition flags are set to eq
void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
void TestJSArrayForAllocationMemento(Register receiver_reg,
Register scratch_reg);
private:

347
deps/v8/src/arm/simulator-arm.cc

@ -919,6 +919,54 @@ void Simulator::set_dw_register(int dreg, const int* dbl) {
}
void Simulator::get_d_register(int dreg, uint64_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
}
void Simulator::set_d_register(int dreg, const uint64_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
}
void Simulator::get_d_register(int dreg, uint32_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
}
void Simulator::set_d_register(int dreg, const uint32_t* value) {
ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
}
void Simulator::get_q_register(int qreg, uint64_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
}
void Simulator::set_q_register(int qreg, const uint64_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
}
void Simulator::get_q_register(int qreg, uint32_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
}
void Simulator::set_q_register(int qreg, const uint32_t* value) {
ASSERT((qreg >= 0) && (qreg < num_q_registers));
memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
}
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
@ -1026,6 +1074,7 @@ void Simulator::TrashCallerSaveRegisters() {
registers_[12] = 0x50Bad4U;
}
// Some Operating Systems allow unaligned access on ARMv7 targets. We
// assume that unaligned accesses are not allowed unless the v8 build system
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
@ -1485,7 +1534,7 @@ static int count_bits(int bit_vector) {
}
void Simulator::ProcessPUW(Instruction* instr,
int32_t Simulator::ProcessPU(Instruction* instr,
int num_regs,
int reg_size,
intptr_t* start_address,
@ -1520,11 +1569,10 @@ void Simulator::ProcessPUW(Instruction* instr,
break;
}
}
if (instr->HasW()) {
set_register(rn, rn_val);
}
return rn_val;
}
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
@ -1532,7 +1580,8 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
int32_t rn_val =
ProcessPU(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
// Catch null pointers a little earlier.
@ -1551,6 +1600,9 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
rlist >>= 1;
}
ASSERT(end_address == ((intptr_t)address) - 4);
if (instr->HasW()) {
set_register(instr->RnValue(), rn_val);
}
}
@ -1573,7 +1625,8 @@ void Simulator::HandleVList(Instruction* instr) {
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
int32_t rn_val =
ProcessPU(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
@ -1606,6 +1659,9 @@ void Simulator::HandleVList(Instruction* instr) {
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
if (instr->HasW()) {
set_register(instr->RnValue(), rn_val);
}
}
@ -1954,6 +2010,7 @@ double Simulator::canonicalizeNaN(double value) {
FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
}
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@ -2596,10 +2653,43 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
}
case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) { // USAT.
if (instr->Bit(4) == 0) {
// Memop.
} else {
if (instr->Bit(5) == 0) {
switch (instr->Bits(22, 21)) {
case 0:
if (instr->Bit(20) == 0) {
if (instr->Bit(6) == 0) {
// Pkhbt.
uint32_t rn_val = get_register(rn);
uint32_t rm_val = get_register(instr->RmValue());
int32_t shift = instr->Bits(11, 7);
rm_val <<= shift;
set_register(rd, (rn_val & 0xFFFF) | (rm_val & 0xFFFF0000U));
} else {
// Pkhtb.
uint32_t rn_val = get_register(rn);
int32_t rm_val = get_register(instr->RmValue());
int32_t shift = instr->Bits(11, 7);
if (shift == 0) {
shift = 32;
}
rm_val >>= shift;
set_register(rd, (rn_val & 0xFFFF0000U) | (rm_val & 0xFFFF));
}
} else {
UNIMPLEMENTED();
}
break;
case 1:
UNIMPLEMENTED();
break;
case 2:
UNIMPLEMENTED();
break;
case 3: {
// Usat.
int32_t sat_pos = instr->Bits(20, 16);
int32_t sat_val = (1 << sat_pos) - 1;
int32_t shift = instr->Bits(11, 7);
@ -2619,15 +2709,94 @@ void Simulator::DecodeType3(Instruction* instr) {
rm_val = 0;
}
set_register(rd, rm_val);
} else { // SSAT.
break;
}
}
} else {
switch (instr->Bits(22, 21)) {
case 0:
UNIMPLEMENTED();
break;
case 1:
UNIMPLEMENTED();
break;
case 2:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
// Uxtb16.
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd,
(rm_val & 0xFF) | (rm_val & 0xFF0000));
} else {
UNIMPLEMENTED();
}
return;
} else {
Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
UNIMPLEMENTED();
}
break;
case 3:
if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
if (instr->Bits(19, 16) == 0xF) {
// Uxtb.
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, (rm_val & 0xFF));
} else {
// Uxtab.
uint32_t rn_val = get_register(rn);
uint32_t rm_val = get_register(instr->RmValue());
int32_t rotate = instr->Bits(11, 10);
switch (rotate) {
case 0:
break;
case 1:
rm_val = (rm_val >> 8) | (rm_val << 24);
break;
case 2:
rm_val = (rm_val >> 16) | (rm_val << 16);
break;
case 3:
rm_val = (rm_val >> 24) | (rm_val << 8);
break;
}
set_register(rd, rn_val + (rm_val & 0xFF));
}
} else {
UNIMPLEMENTED();
}
break;
}
}
return;
}
break;
}
case db_x: {
if (FLAG_enable_sudiv) {
@ -3349,6 +3518,156 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
void Simulator::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl signed
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
int esize = 8 * imm3;
int elements = 64 / esize;
int8_t from[8];
get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
int16_t to[8];
int e = 0;
while (e < elements) {
to[e] = from[e];
e++;
}
set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
} else {
UNIMPLEMENTED();
}
break;
case 7:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
// vmovl unsigned
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
if ((imm3 != 1) && (imm3 != 2) && (imm3 != 4)) UNIMPLEMENTED();
int esize = 8 * imm3;
int elements = 64 / esize;
uint8_t from[8];
get_d_register(Vm, reinterpret_cast<uint64_t*>(from));
uint16_t to[8];
int e = 0;
while (e < elements) {
to[e] = from[e];
e++;
}
set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
} else {
UNIMPLEMENTED();
}
break;
case 8:
if (instr->Bits(21, 20) == 0) {
// vst1
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Rn = instr->VnValue();
int type = instr->Bits(11, 8);
int Rm = instr->VmValue();
int32_t address = get_register(Rn);
int regs = 0;
switch (type) {
case nlt_1:
regs = 1;
break;
case nlt_2:
regs = 2;
break;
case nlt_3:
regs = 3;
break;
case nlt_4:
regs = 4;
break;
default:
UNIMPLEMENTED();
break;
}
int r = 0;
while (r < regs) {
uint32_t data[2];
get_d_register(Vd + r, data);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
address += 8;
r++;
}
if (Rm != 15) {
if (Rm == 13) {
set_register(Rn, address);
} else {
set_register(Rn, get_register(Rn) + get_register(Rm));
}
}
} else if (instr->Bits(21, 20) == 2) {
// vld1
int Vd = (instr->Bit(22) << 4) | instr->VdValue();
int Rn = instr->VnValue();
int type = instr->Bits(11, 8);
int Rm = instr->VmValue();
int32_t address = get_register(Rn);
int regs = 0;
switch (type) {
case nlt_1:
regs = 1;
break;
case nlt_2:
regs = 2;
break;
case nlt_3:
regs = 3;
break;
case nlt_4:
regs = 4;
break;
default:
UNIMPLEMENTED();
break;
}
int r = 0;
while (r < regs) {
uint32_t data[2];
data[0] = ReadW(address, instr);
data[1] = ReadW(address + 4, instr);
set_d_register(Vd + r, data);
address += 8;
r++;
}
if (Rm != 15) {
if (Rm == 13) {
set_register(Rn, address);
} else {
set_register(Rn, get_register(Rn) + get_register(Rm));
}
}
} else {
UNIMPLEMENTED();
}
break;
case 0xA:
case 0xB:
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
// pld: ignore instruction.
} else {
UNIMPLEMENTED();
}
break;
default:
UNIMPLEMENTED();
break;
}
}
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@ -3365,7 +3684,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
}
if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
DecodeSpecialCondition(instr);
} else if (ConditionallyExecute(instr)) {
switch (instr->TypeValue()) {
case 0:

17
deps/v8/src/arm/simulator-arm.h

@ -144,7 +144,10 @@ class Simulator {
d8, d9, d10, d11, d12, d13, d14, d15,
d16, d17, d18, d19, d20, d21, d22, d23,
d24, d25, d26, d27, d28, d29, d30, d31,
num_d_registers = 32
num_d_registers = 32,
q0 = 0, q1, q2, q3, q4, q5, q6, q7,
q8, q9, q10, q11, q12, q13, q14, q15,
num_q_registers = 16
};
explicit Simulator(Isolate* isolate);
@ -163,6 +166,15 @@ class Simulator {
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
void get_d_register(int dreg, uint64_t* value);
void set_d_register(int dreg, const uint64_t* value);
void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value);
void get_q_register(int qreg, uint64_t* value);
void set_q_register(int qreg, const uint64_t* value);
void get_q_register(int qreg, uint32_t* value);
void set_q_register(int qreg, const uint32_t* value);
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
@ -279,7 +291,7 @@ class Simulator {
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void ProcessPUW(Instruction* instr,
int32_t ProcessPU(Instruction* instr,
int num_regs,
int operand_size,
intptr_t* start_address,
@ -328,6 +340,7 @@ class Simulator {
// Support for VFP.
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeSpecialCondition(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);

165
deps/v8/src/arm/stub-cache-arm.cc

@ -437,91 +437,58 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
}
void BaseStoreStubCompiler::GenerateNegativeHolderLookup(
MacroAssembler* masm,
Handle<JSObject> holder,
Register holder_reg,
Handle<Name> name,
Label* miss) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
masm, Handle<GlobalObject>::cast(holder), name, scratch1(), miss);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss, holder_reg, name, scratch1(), scratch2());
}
}
// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register name_reg,
Register storage_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss_label,
Label* miss_restore_name,
Label* slow) {
// r0 : value
Label exit;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
int descriptor = transition->LastAdded();
DescriptorArray* descriptors = transition->instance_descriptors();
PropertyDetails details = descriptors->GetDetails(descriptor);
Representation representation = details.representation();
ASSERT(!representation.IsNone());
// Ensure no transitions to deprecated maps are followed.
__ CheckMapDeprecated(transition, scratch1, miss_label);
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
holder = lookup->holder();
} else {
// Find the top object.
holder = *object;
do {
holder = JSObject::cast(holder->GetPrototype());
} while (holder->GetPrototype()->IsJSObject());
}
Register holder_reg = CheckPrototypes(
object, receiver_reg, Handle<JSObject>(holder), name_reg,
scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER);
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
masm,
Handle<GlobalObject>(GlobalObject::cast(holder)),
name,
scratch1,
miss_restore_name);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
}
}
Register storage_reg = name_reg;
if (details.type() == CONSTANT_FUNCTION) {
Handle<HeapObject> constant(
HeapObject::cast(descriptors->GetValue(descriptor)));
__ LoadHeapObject(scratch1, constant);
__ cmp(value_reg, scratch1);
__ b(ne, miss_restore_name);
__ b(ne, miss_label);
} else if (FLAG_track_fields && representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_restore_name);
__ JumpIfNotSmi(value_reg, miss_label);
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_restore_name);
__ JumpIfSmi(value_reg, miss_label);
} else if (FLAG_track_double_fields && representation.IsDouble()) {
Label do_store, heap_number;
__ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
@ -535,7 +502,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ bind(&heap_number);
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
miss_restore_name, DONT_DO_SMI_CHECK);
miss_label, DONT_DO_SMI_CHECK);
__ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ bind(&do_store);
@ -566,8 +533,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
// Update the write barrier for the map field.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
@ -604,19 +570,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(receiver_reg,
offset,
name_reg,
storage_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@ -636,19 +596,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
}
if (!FLAG_track_fields || !representation.IsSmi()) {
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
if (!FLAG_track_double_fields || !representation.IsDouble()) {
__ mov(name_reg, value_reg);
} else {
ASSERT(storage_reg.is(name_reg));
__ mov(storage_reg, value_reg);
}
__ RecordWriteField(scratch1,
offset,
name_reg,
storage_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
@ -668,7 +622,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Register receiver_reg,
@ -680,15 +634,6 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// r0 : value
Label exit;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
@ -1240,6 +1185,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
int save_at_depth,
Label* miss,
PrototypeCheckType check) {
// Make sure that the type feedback oracle harvests the receiver map.
// TODO(svenpanne) Remove this hack when all ICs are reworked.
__ mov(scratch1, Operand(Handle<Map>(object->map())));
Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@ -1342,7 +1291,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ b(success);
@ -1352,6 +1302,17 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
}
void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name,
Label* success,
Label* miss) {
if (!miss->is_unused()) {
__ b(success);
GenerateRestoreName(masm(), miss, name);
TailCallBuiltin(masm(), MissBuiltin(kind()));
}
}
Register BaseLoadStubCompiler::CallbackHandlerFrontend(
Handle<JSObject> object,
Register object_reg,
@ -1394,7 +1355,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
__ b(ne, &miss);
}
HandlerFrontendFooter(success, &miss);
HandlerFrontendFooter(name, success, &miss);
return reg;
}
@ -1415,7 +1376,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
HandlerFrontendFooter(success, &miss);
HandlerFrontendFooter(name, success, &miss);
}
@ -1728,11 +1689,11 @@ Handle<Code> CallStubCompiler::CompileArrayCodeCall(
GenerateLoadFunctionFromCell(cell, function, &miss);
}
Handle<Smi> kind(Smi::FromInt(GetInitialFastElementsKind()), isolate());
Handle<Cell> kind_feedback_cell =
isolate()->factory()->NewCell(kind);
Handle<AllocationSite> site = isolate()->factory()->NewAllocationSite();
site->set_transition_info(Smi::FromInt(GetInitialFastElementsKind()));
Handle<Cell> site_feedback_cell = isolate()->factory()->NewCell(site);
__ mov(r0, Operand(argc));
__ mov(r2, Operand(kind_feedback_cell));
__ mov(r2, Operand(site_feedback_cell));
__ mov(r1, Operand(function));
ArrayConstructorStub stub(isolate());
@ -2824,34 +2785,30 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<Name> name,
Handle<JSObject> object,
Handle<JSObject> holder,
Handle<Name> name,
Handle<ExecutableAccessorInfo> callback) {
Label miss;
// Check that the maps haven't changed.
__ JumpIfSmi(receiver(), &miss);
CheckPrototypes(object, receiver(), holder,
scratch1(), scratch2(), scratch3(), name, &miss);
Label success;
HandlerFrontend(object, receiver(), holder, name, &success);
__ bind(&success);
// Stub never generated for non-global objects that require access checks.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
__ push(receiver()); // receiver
__ mov(ip, Operand(callback)); // callback info
__ Push(ip, this->name(), value());
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
return GetICCode(kind(), Code::CALLBACKS, name);
return GetCode(kind(), Code::CALLBACKS, name);
}
@ -3105,7 +3062,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
HandlerFrontendFooter(&success, &miss);
HandlerFrontendFooter(name, &success, &miss);
__ bind(&success);
Counters* counters = isolate()->counters();
@ -3118,7 +3075,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
}
Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
CodeHandleList* handlers,
Handle<Name> name,

127
deps/v8/src/array-iterator.js

@ -0,0 +1,127 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// 'AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'use strict';
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
var ARRAY_ITERATOR_KIND_KEYS = 1;
var ARRAY_ITERATOR_KIND_VALUES = 2;
var ARRAY_ITERATOR_KIND_ENTRIES = 3;
// The spec draft also has "sparse" but it is never used.
var iteratorObjectSymbol = %CreateSymbol(void 0);
var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0);
var arrayIterationKindSymbol = %CreateSymbol(void 0);
function ArrayIterator() {}
// 15.4.5.1 CreateArrayIterator Abstract Operation
function CreateArrayIterator(array, kind) {
var object = ToObject(array);
var iterator = new ArrayIterator;
iterator[iteratorObjectSymbol] = object;
iterator[arrayIteratorNextIndexSymbol] = 0;
iterator[arrayIterationKindSymbol] = kind;
return iterator;
}
// 15.19.4.3.4 CreateItrResultObject
function CreateIteratorResultObject(value, done) {
return {value: value, done: done};
}
// 15.4.5.2.2 ArrayIterator.prototype.next( )
function ArrayIteratorNext() {
var iterator = ToObject(this);
var array = iterator[iteratorObjectSymbol];
if (!array) {
throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']);
}
var index = iterator[arrayIteratorNextIndexSymbol];
var itemKind = iterator[arrayIterationKindSymbol];
var length = TO_UINT32(array.length);
// "sparse" is never used.
if (index >= length) {
iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity
return CreateIteratorResultObject(void 0, true);
}
var elementKey = ToString(index);
iterator[arrayIteratorNextIndexSymbol] = index + 1;
if (itemKind == ARRAY_ITERATOR_KIND_VALUES)
return CreateIteratorResultObject(array[elementKey], false);
if (itemKind == ARRAY_ITERATOR_KIND_ENTRIES)
return CreateIteratorResultObject([elementKey, array[elementKey]], false);
return CreateIteratorResultObject(elementKey, false);
}
function ArrayEntries() {
return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_ENTRIES);
}
function ArrayValues() {
return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_VALUES);
}
function ArrayKeys() {
return CreateArrayIterator(this, ARRAY_ITERATOR_KIND_KEYS);
}
function SetUpArrayIterator() {
%CheckIsBootstrapping();
%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
%FunctionSetReadOnlyPrototype(ArrayIterator);
InstallFunctions(ArrayIterator.prototype, DONT_ENUM, $Array(
'next', ArrayIteratorNext
));
}
SetUpArrayIterator();
function ExtendArrayPrototype() {
%CheckIsBootstrapping();
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
'entries', ArrayEntries,
'values', ArrayValues,
'keys', ArrayKeys
));
}
ExtendArrayPrototype();

17
deps/v8/src/assembler.cc

@ -381,6 +381,7 @@ void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
}
}
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
for (int i = 0; i < kIntSize; i++) {
@ -390,6 +391,7 @@ void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
}
}
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
@ -847,7 +849,7 @@ void RelocInfo::Verify() {
CHECK(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = HEAP->FindCodeObject(addr);
Object* found = code->GetIsolate()->FindCodeObject(addr);
CHECK(found->IsCode());
CHECK(code->address() == HeapObject::cast(found)->address());
break;
@ -1071,6 +1073,11 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
}
ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
}
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(
@ -1123,6 +1130,12 @@ ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
}
ExternalReference ExternalReference::allocation_sites_list_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->allocation_sites_list_address());
}
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
@ -1322,6 +1335,7 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
return ExternalReference(Redirect(isolate, function));
}
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
@ -1334,6 +1348,7 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
ExternalReference ExternalReference::re_word_character_map() {
return ExternalReference(
NativeRegExpMacroAssembler::word_character_map_address());

5
deps/v8/src/assembler.h

@ -747,6 +747,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);
// Static variable Heap::allocation_sites_list_address()
static ExternalReference allocation_sites_list_address(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate);
@ -863,6 +866,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
static ExternalReference stress_deopt_count(Isolate* isolate);
private:
explicit ExternalReference(void* address)
: address_(address) {}

34
deps/v8/src/ast.cc

@ -71,8 +71,14 @@ bool Expression::IsNullLiteral() {
}
bool Expression::IsUndefinedLiteral() {
return AsLiteral() != NULL && AsLiteral()->value()->IsUndefined();
bool Expression::IsUndefinedLiteral(Isolate* isolate) {
VariableProxy* var_proxy = AsVariableProxy();
if (var_proxy == NULL) return false;
Variable* var = var_proxy->var();
// The global identifier "undefined" is immutable. Everything
// else could be reassigned.
return var != NULL && var->location() == Variable::UNALLOCATED &&
var_proxy->name()->Equals(isolate->heap()->undefined_string());
}
@ -385,12 +391,13 @@ static bool IsVoidOfLiteral(Expression* expr) {
static bool MatchLiteralCompareUndefined(Expression* left,
Token::Value op,
Expression* right,
Expression** expr) {
Expression** expr,
Isolate* isolate) {
if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) {
if (left->IsUndefinedLiteral(isolate) && Token::IsEqualityOp(op)) {
*expr = right;
return true;
}
@ -398,9 +405,10 @@ static bool MatchLiteralCompareUndefined(Expression* left,
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr);
bool CompareOperation::IsLiteralCompareUndefined(
Expression** expr, Isolate* isolate) {
return MatchLiteralCompareUndefined(left_, op_, right_, expr, isolate) ||
MatchLiteralCompareUndefined(right_, op_, left_, expr, isolate);
}
@ -503,7 +511,7 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
store_mode_ = oracle->GetStoreMode(id);
} else if (oracle->StoreIsPolymorphic(id)) {
} else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
store_mode_ = oracle->GetStoreMode(id);
@ -520,9 +528,11 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
} else if (oracle->StoreIsPolymorphic(id)) {
} else if (oracle->StoreIsKeyedPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
} else {
oracle->CollectPolymorphicStoreReceiverTypes(id, &receiver_types_);
}
store_mode_ = oracle->GetStoreMode(id);
type_ = oracle->IncrementType(this);
@ -675,8 +685,10 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
Object* value = allocation_info_cell_->value();
if (value->IsSmi()) {
elements_kind_ = static_cast<ElementsKind>(Smi::cast(value)->value());
ASSERT(!value->IsTheHole());
if (value->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(value);
elements_kind_ = site->GetElementsKind();
}
}
}

22
deps/v8/src/ast.h

@ -353,14 +353,12 @@ class Expression: public AstNode {
// True iff the expression is the null literal.
bool IsNullLiteral();
// True iff the expression is the undefined literal.
bool IsUndefinedLiteral();
// True if we can prove that the expression is the undefined literal.
bool IsUndefinedLiteral(Isolate* isolate);
// Expression type bounds
Handle<Type> upper_type() { return upper_type_; }
Handle<Type> lower_type() { return lower_type_; }
void set_upper_type(Handle<Type> type) { upper_type_ = type; }
void set_lower_type(Handle<Type> type) { lower_type_ = type; }
Bounds bounds() { return bounds_; }
void set_bounds(Bounds bounds) { bounds_ = bounds; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
@ -391,15 +389,13 @@ class Expression: public AstNode {
protected:
explicit Expression(Isolate* isolate)
: upper_type_(Type::Any(), isolate),
lower_type_(Type::None(), isolate),
: bounds_(Type::None(), Type::Any(), isolate),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
private:
Handle<Type> upper_type_;
Handle<Type> lower_type_;
Bounds bounds_;
byte to_boolean_types_;
const BailoutId id_;
@ -1884,9 +1880,6 @@ class BinaryOperation: public Expression {
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
// TODO(rossberg): result_type should be subsumed by lower_type.
Handle<Type> result_type() const { return result_type_; }
void set_result_type(Handle<Type> type) { result_type_ = type; }
Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
@ -1913,7 +1906,6 @@ class BinaryOperation: public Expression {
Expression* right_;
int pos_;
Handle<Type> result_type_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS.
Maybe<int> fixed_right_arg_;
@ -2002,7 +1994,7 @@ class CompareOperation: public Expression {
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
bool IsLiteralCompareUndefined(Expression** expr);
bool IsLiteralCompareUndefined(Expression** expr, Isolate* isolate);
bool IsLiteralCompareNull(Expression** expr);
protected:

9
deps/v8/src/atomicops.h

@ -153,14 +153,11 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && \
(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_macosx.h"
#elif defined(__GNUC__) && \
(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
#include "atomicops_internals_arm_gcc.h"

1
deps/v8/src/atomicops_internals_x86_gcc.cc

@ -124,6 +124,7 @@ class AtomicOpsx86Initializer {
}
};
// A global to get use initialized on startup via static initialization :/
AtomicOpsx86Initializer g_initer;

1
deps/v8/src/bignum.cc

@ -45,6 +45,7 @@ static int BitSize(S value) {
return 8 * sizeof(value);
}
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
ASSERT(kBigitSize >= BitSize(value));

30
deps/v8/src/bootstrapper.cc

@ -200,7 +200,7 @@ class Genesis BASE_EMBEDDED {
// detached from the other objects in the snapshot.
void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
// New context initialization. Used for creating a context from scratch.
bool InitializeGlobal(Handle<GlobalObject> inner_global,
void InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function);
void InitializeExperimentalGlobal();
// Installs the contents of the native .js files on the global objects.
@ -829,7 +829,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
// This is only called if we are not using snapshots. The equivalent
// work in the snapshot case is done in HookUpInnerGlobal.
bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<JSFunction> empty_function) {
// --- G l o b a l C o n t e x t ---
// Use the empty function as closure (no scope info).
@ -1053,10 +1053,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
{ MaybeObject* result = cons->SetInstancePrototype(
native_context()->initial_object_prototype());
if (result->IsFailure()) return false;
}
JSFunction::SetInstancePrototype(cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
@ -1277,7 +1275,6 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
native_context()->set_random_seed(*zeroed_byte_array);
memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
}
return true;
}
@ -1289,7 +1286,7 @@ Handle<JSFunction> Genesis::InstallTypedArray(
Builtins::kIllegal, false, true);
Handle<Map> initial_map = isolate()->factory()->NewMap(
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, elementsKind);
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields, elementsKind);
result->set_initial_map(*initial_map);
initial_map->set_constructor(*result);
return result;
@ -1327,6 +1324,11 @@ void Genesis::InitializeExperimentalGlobal() {
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
{ // -- W e a k S e t
InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
}
}
if (FLAG_harmony_array_buffer) {
@ -1373,7 +1375,7 @@ void Genesis::InitializeExperimentalGlobal() {
Handle<JSFunction> data_view_fun =
InstallFunction(
global, "DataView", JS_DATA_VIEW_TYPE,
JSDataView::kSize,
JSDataView::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_data_view_fun(*data_view_fun);
@ -1586,6 +1588,7 @@ void Genesis::InstallNativeFunctions() {
to_complete_property_descriptor);
}
void Genesis::InstallExperimentalNativeFunctions() {
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
@ -2071,6 +2074,11 @@ bool Genesis::InstallExperimentalNatives() {
"native generator.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_iteration &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native array-iterator.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
InstallExperimentalNativeFunctions();
@ -2239,10 +2247,12 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
#endif
}
static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
static bool MatchRegisteredExtensions(void* key1, void* key2) {
return key1 == key2;
}
@ -2624,7 +2634,7 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSGlobalProxy> global_proxy =
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
if (!InitializeGlobal(inner_global, empty_function)) return;
InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches();
InitializeNormalizedMapCaches();
if (!InstallNatives()) return;

18
deps/v8/src/builtins.cc

@ -182,6 +182,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
return result;
}
// ----------------------------------------------------------------------------
BUILTIN(Illegal) {
@ -210,14 +211,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
MaybeObject* maybe_array = array->Initialize(0);
if (maybe_array->IsFailure()) return maybe_array;
AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
ElementsKind to_kind = array->GetElementsKind();
if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
AllocationMemento* memento = AllocationMemento::FindForJSObject(array);
if (memento != NULL && memento->IsValid()) {
AllocationSite* site = memento->GetAllocationSite();
ElementsKind to_kind = site->GetElementsKind();
if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
to_kind)) {
// We have advice that we should change the elements kind
if (FLAG_trace_track_allocation_sites) {
PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n",
reinterpret_cast<void*>(array),
ElementsKindToString(array->GetElementsKind()),
ElementsKindToString(to_kind));
@ -1153,6 +1155,7 @@ BUILTIN(StrictModePoisonPill) {
"strict_poison_pill", HandleVector<Object>(NULL, 0)));
}
// -----------------------------------------------------------------------------
//
@ -1432,14 +1435,17 @@ static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
KeyedLoadIC::GenerateIndexedInterceptor(masm);
}
static void Generate_KeyedLoadIC_NonStrictArguments(MacroAssembler* masm) {
KeyedLoadIC::GenerateNonStrictArguments(masm);
}
static void Generate_StoreIC_Slow(MacroAssembler* masm) {
StoreIC::GenerateSlow(masm);
}
@ -1539,14 +1545,17 @@ static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
static void Generate_KeyedStoreIC_NonStrictArguments(MacroAssembler* masm) {
KeyedStoreIC::GenerateNonStrictArguments(masm);
}
static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
}
static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
}
@ -1716,6 +1725,7 @@ void Builtins::InitBuiltinFunctionTable() {
#undef DEF_FUNCTION_PTR_A
}
void Builtins::SetUp(bool create_heap_objects) {
ASSERT(!initialized_);
Isolate* isolate = Isolate::Current();

12
deps/v8/src/char-predicates-inl.h

@ -71,6 +71,18 @@ inline bool IsHexDigit(uc32 c) {
}
inline bool IsOctalDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return IsInRange(c, '0', '7');
}
inline bool IsBinaryDigit(uc32 c) {
// ECMA-262, 6th, 7.8.3
return c == '0' || c == '1';
}
inline bool IsRegExpWord(uc16 c) {
return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c)

2
deps/v8/src/char-predicates.h

@ -40,6 +40,8 @@ inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
inline bool IsDecimalDigit(uc32 c);
inline bool IsHexDigit(uc32 c);
inline bool IsOctalDigit(uc32 c);
inline bool IsBinaryDigit(uc32 c);
inline bool IsRegExpWord(uc32 c);
inline bool IsRegExpNewline(uc32 c);

6
deps/v8/src/checks.h

@ -230,6 +230,11 @@ inline void CheckNonEqualsHelper(const char* file,
#define CHECK_LE(a, b) CHECK((a) <= (b))
// Use C++11 static_assert if possible, which gives error
// messages that are easier to understand on first sight.
#if __cplusplus >= 201103L
#define STATIC_CHECK(test) static_assert(test, #test)
#else
// This is inspired by the static assertion facility in boost. This
// is pretty magical. If it causes you trouble on a platform you may
// find a fix in the boost code.
@ -249,6 +254,7 @@ template <int> class StaticAssertionHelper { };
typedef \
StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
#endif
extern bool FLAG_enable_slow_asserts;

13
deps/v8/src/circular-queue-inl.h

@ -35,7 +35,16 @@ namespace internal {
void* SamplingCircularQueue::Enqueue() {
WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
if (producer_pos_->enqueue_pos == producer_pos_->next_chunk_pos) {
if (producer_pos_->enqueue_pos == buffer_ + buffer_size_) {
producer_pos_->next_chunk_pos = buffer_;
producer_pos_->enqueue_pos = buffer_;
}
Acquire_Store(producer_pos_->next_chunk_pos, kEnqueueStarted);
// Skip marker.
producer_pos_->enqueue_pos += 1;
producer_pos_->next_chunk_pos += chunk_size_;
}
void* result = producer_pos_->enqueue_pos;
producer_pos_->enqueue_pos += record_size_;
return result;
@ -44,7 +53,7 @@ void* SamplingCircularQueue::Enqueue() {
void SamplingCircularQueue::WrapPositionIfNeeded(
SamplingCircularQueue::Cell** pos) {
if (**pos == kEnd) *pos = buffer_;
if (*pos == buffer_ + buffer_size_) *pos = buffer_;
}

37
deps/v8/src/circular-queue.cc

@ -33,26 +33,22 @@ namespace v8 {
namespace internal {
SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
int buffer_size_in_chunks)
SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes,
size_t desired_chunk_size_in_bytes,
unsigned buffer_size_in_chunks)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
record_size_in_bytes),
record_size_in_bytes + sizeof(Cell)),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
// The distance ensures that producer and consumer never step on
// each other's chunks and helps eviction of produced data from
// the CPU cache (having that chunk size is bigger than the cache.)
producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
buffer_(NewArray<Cell>(buffer_size_)) {
ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes);
ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_);
ASSERT(buffer_size_in_chunks > 2);
// Clean up the whole buffer to avoid encountering a random kEnd
// while enqueuing.
for (int i = 0; i < buffer_size_; ++i) {
// Mark all chunks as clear.
for (size_t i = 0; i < buffer_size_; i += chunk_size_) {
buffer_[i] = kClear;
}
buffer_[buffer_size_] = kEnd;
// Layout producer and consumer position pointers each on their own
// cache lines to avoid cache lines thrashing due to simultaneous
@ -67,6 +63,7 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
producer_pos_ = reinterpret_cast<ProducerPosition*>(
RoundUp(positions_, kProcessorCacheLineSize));
producer_pos_->next_chunk_pos = buffer_;
producer_pos_->enqueue_pos = buffer_;
consumer_pos_ = reinterpret_cast<ConsumerPosition*>(
@ -74,7 +71,11 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
// The distance ensures that producer and consumer never step on
// each other's chunks and helps eviction of produced data from
// the CPU cache (having that chunk size is bigger than the cache.)
const size_t producer_consumer_distance = (2 * chunk_size_);
consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance;
consumer_pos_->dequeue_pos = NULL;
}
@ -89,9 +90,11 @@ void* SamplingCircularQueue::StartDequeue() {
if (consumer_pos_->dequeue_pos != NULL) {
return consumer_pos_->dequeue_pos;
} else {
if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) {
consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos;
consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_;
if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) {
// Skip marker.
consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1;
consumer_pos_->dequeue_end_pos =
consumer_pos_->dequeue_chunk_pos + chunk_size_;
return consumer_pos_->dequeue_pos;
} else {
return NULL;

25
deps/v8/src/circular-queue.h

@ -45,9 +45,9 @@ namespace internal {
class SamplingCircularQueue {
public:
// Executed on the application thread.
SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
int buffer_size_in_chunks);
SamplingCircularQueue(size_t record_size_in_bytes,
size_t desired_chunk_size_in_bytes,
unsigned buffer_size_in_chunks);
~SamplingCircularQueue();
// Enqueue returns a pointer to a memory location for storing the next
@ -67,12 +67,16 @@ class SamplingCircularQueue {
void FlushResidualRecords();
typedef AtomicWord Cell;
// Reserved values for the first cell of a record.
static const Cell kClear = 0; // Marks clean (processed) chunks.
static const Cell kEnd = -1; // Marks the end of the buffer.
private:
// Reserved values for the chunk marker (first Cell in each chunk).
enum {
kClear, // Marks clean (processed) chunks.
kEnqueueStarted // Marks chunks where enqueue started.
};
struct ProducerPosition {
Cell* next_chunk_pos;
Cell* enqueue_pos;
};
struct ConsumerPosition {
@ -84,11 +88,10 @@ class SamplingCircularQueue {
INLINE(void WrapPositionIfNeeded(Cell** pos));
const int record_size_;
const int chunk_size_in_bytes_;
const int chunk_size_;
const int buffer_size_;
const int producer_consumer_distance_;
const size_t record_size_;
const size_t chunk_size_in_bytes_;
const size_t chunk_size_;
const size_t buffer_size_;
Cell* buffer_;
byte* positions_;
ProducerPosition* producer_pos_;

238
deps/v8/src/code-stubs-hydrogen.cc

@ -315,40 +315,44 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
int length = casted_stub()->length();
HInstruction* boilerplate =
HInstruction* allocation_site =
AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
GetParameter(1),
NULL,
FAST_ELEMENTS));
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site, undefined);
checker.Then();
HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo();
HInstruction* boilerplate = AddLoad(allocation_site, access);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements = AddLoadElements(boilerplate);
IfBuilder if_fixed_cow(this);
if_fixed_cow.IfCompareMap(elements, factory->fixed_cow_array_map());
if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
if_fixed_cow.Then();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
0/*copy-on-write*/));
if_fixed_cow.Else();
IfBuilder if_fixed(this);
if_fixed.IfCompareMap(elements, factory->fixed_array_map());
if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
if_fixed.Then();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
allocation_site,
alloc_site_mode,
FAST_ELEMENTS,
length));
if_fixed.Else();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
allocation_site,
alloc_site_mode,
FAST_DOUBLE_ELEMENTS,
length));
@ -356,6 +360,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
allocation_site,
alloc_site_mode,
elements_kind,
length));
@ -392,7 +397,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
AddInstruction(new(zone) HInstanceSize(boilerplate));
HValue* size_in_words =
AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2));
checker.IfCompare(boilerplate_size, size_in_words, Token::EQ);
checker.If<HCompareNumericAndBranch>(boilerplate_size,
size_in_words, Token::EQ);
checker.Then();
HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size));
@ -420,6 +426,49 @@ Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
Zone* zone = this->zone();
HValue* size = AddInstruction(new(zone) HConstant(AllocationSite::kSize));
HAllocate::Flags flags = HAllocate::DefaultFlags();
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
HInstruction* object = AddInstruction(new(zone)
HAllocate(context(), size, HType::JSObject(), flags));
// Store the map
Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(),
isolate());
AddStoreMapConstant(object, allocation_site_map);
// Store the payload (smi elements kind)
HValue* initial_elements_kind = AddInstruction(new(zone) HConstant(
GetInitialFastElementsKind()));
Add<HStoreNamedField>(object,
HObjectAccess::ForAllocationSiteTransitionInfo(),
initial_elements_kind);
Add<HLinkObjectInList>(object, HObjectAccess::ForAllocationSiteWeakNext(),
HLinkObjectInList::ALLOCATION_SITE_LIST);
// We use a hammer (SkipWriteBarrier()) to indicate that we know the input
// cell is really a Cell, and so no write barrier is needed.
// TODO(mvstanton): Add a debug_code check to verify the input cell is really
// a cell. (perhaps with a new instruction, HAssert).
HInstruction* cell = GetParameter(0);
HObjectAccess access = HObjectAccess::ForCellValue();
HStoreNamedField* store = AddStore(cell, access, object);
store->SkipWriteBarrier();
return cell;
}
Handle<Code> CreateAllocationSiteStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
@ -483,49 +532,15 @@ Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
Zone* zone = this->zone();
HValue* js_array = GetParameter(0);
HValue* map = GetParameter(1);
info()->MarkAsSavesCallerDoubles();
AddInstruction(new(zone) HTrapAllocationMemento(js_array));
HInstruction* array_length =
AddLoad(js_array, HObjectAccess::ForArrayLength());
array_length->set_type(HType::Smi());
ElementsKind to_kind = casted_stub()->to_kind();
BuildNewSpaceArrayCheck(array_length, to_kind);
IfBuilder if_builder(this);
if_builder.IfCompare(array_length, graph()->GetConstant0(), Token::EQ);
if_builder.Then();
// Nothing to do, just change the map.
if_builder.Else();
HInstruction* elements = AddLoadElements(js_array);
HInstruction* elements_length = AddLoadFixedArrayLength(elements);
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
context(), to_kind, elements_length);
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
to_kind, array_length, elements_length);
AddStore(js_array, HObjectAccess::ForElementsPointer(), new_elements);
if_builder.End();
AddStore(js_array, HObjectAccess::ForMap(), map);
BuildTransitionElementsKind(GetParameter(0),
GetParameter(1),
casted_stub()->from_kind(),
casted_stub()->to_kind(),
true);
return js_array;
return GetParameter(0);
}
@ -545,7 +560,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
}
HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
JSArrayBuilder array_builder(this, kind, property_cell, constructor,
// Walk through the property cell to the AllocationSite
HValue* alloc_site = AddInstruction(new(zone()) HLoadNamedField(property_cell,
HObjectAccess::ForCellValue()));
JSArrayBuilder array_builder(this, kind, alloc_site, constructor,
override_mode);
HValue* result = NULL;
switch (argument_class) {
@ -606,7 +624,8 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
HBoundsCheck* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length);
IfBuilder if_builder(this);
if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
if_builder.If<HCompareNumericAndBranch>(checked_arg, constant_zero,
Token::EQ);
if_builder.Then();
Push(initial_capacity_node); // capacity
Push(constant_zero); // length
@ -742,8 +761,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(isolate->heap()->meta_map());
Handle<Type> type =
CompareNilICStub::StateToType(isolate, stub->GetState(), sentinel_map);
Handle<Type> type = stub->GetType(isolate, sentinel_map);
BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
@ -763,6 +781,45 @@ Handle<Code> CompareNilICStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<UnaryOpStub>::BuildCodeInitializedStub() {
UnaryOpStub* stub = casted_stub();
Handle<Type> type = stub->GetType(graph()->isolate());
HValue* input = GetParameter(0);
// Prevent unwanted HChange being inserted to ensure that the stub
// deopts on newly encountered types.
if (!type->Maybe(Type::Double())) {
input = AddInstruction(new(zone())
HForceRepresentation(input, Representation::Smi()));
}
if (!type->Is(Type::Number())) {
// If we expect to see other things than Numbers, we will create a generic
// stub, which handles all numbers and calls into the runtime for the rest.
IfBuilder if_number(this);
if_number.If<HIsNumberAndBranch>(input);
if_number.Then();
HInstruction* res = BuildUnaryMathOp(input, type, stub->operation());
if_number.Return(AddInstruction(res));
if_number.Else();
HValue* function = AddLoadJSBuiltin(stub->ToJSBuiltin(), context());
Add<HPushArgument>(GetParameter(0));
HValue* result = Add<HInvokeFunction>(context(), function, 1);
if_number.Return(result);
if_number.End();
return graph()->GetConstantUndefined();
}
return AddInstruction(BuildUnaryMathOp(input, type, stub->operation()));
}
Handle<Code> UnaryOpStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
@ -782,4 +839,85 @@ Handle<Code> ToBooleanStub::GenerateCode() {
}
template <>
HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub* stub = casted_stub();
Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
Handle<PropertyCell> placeholder_cell =
isolate()->factory()->NewPropertyCell(placeholer_value);
HParameter* receiver = GetParameter(0);
HParameter* value = GetParameter(2);
if (stub->is_constant()) {
// Assume every store to a constant value changes it.
current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
set_current_block(NULL);
} else {
HValue* cell = Add<HConstant>(placeholder_cell, Representation::Tagged());
// Check that the map of the global has not changed: use a placeholder map
// that will be replaced later with the global object's map.
Handle<Map> placeholder_map = isolate()->factory()->meta_map();
AddInstruction(HCheckMaps::New(receiver, placeholder_map, zone()));
// Load the payload of the global parameter cell. A hole indicates that the
// property has been deleted and that the store must be handled by the
// runtime.
HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
HValue* cell_contents = Add<HLoadNamedField>(cell, access);
IfBuilder builder(this);
HValue* hole_value = Add<HConstant>(hole, Representation::Tagged());
builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
builder.Then();
builder.Deopt();
builder.Else();
Add<HStoreNamedField>(cell, access, value);
builder.End();
}
return value;
}
Handle<Code> StoreGlobalStub::GenerateCode() {
return DoGenerateCode(this);
}
template<>
HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
HValue* value = GetParameter(0);
HValue* map = GetParameter(1);
HValue* key = GetParameter(2);
HValue* object = GetParameter(3);
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
set_current_block(NULL);
} else {
info()->MarkAsSavesCallerDoubles();
BuildTransitionElementsKind(object, map,
casted_stub()->from_kind(),
casted_stub()->to_kind(),
casted_stub()->is_jsarray());
BuildUncheckedMonomorphicElementAccess(object, key, value, NULL,
casted_stub()->is_jsarray(),
casted_stub()->to_kind(),
true, ALLOW_RETURN_HOLE,
casted_stub()->store_mode());
}
return value;
}
Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
return DoGenerateCode(this);
}
} } // namespace v8::internal

200
deps/v8/src/code-stubs.cc

@ -85,6 +85,14 @@ Code::Kind CodeStub::GetCodeKind() const {
}
Handle<Code> CodeStub::GetCodeCopyFromTemplate(Isolate* isolate) {
Handle<Code> ic = GetCode(isolate);
ic = isolate->factory()->CopyCode(ic);
RecordCodeGeneration(*ic, isolate);
return ic;
}
Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
@ -185,11 +193,82 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
}
void CodeStub::PrintName(StringStream* stream) {
void CodeStub::PrintBaseName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
void CodeStub::PrintName(StringStream* stream) {
PrintBaseName(stream);
PrintState(stream);
}
Builtins::JavaScript UnaryOpStub::ToJSBuiltin() {
switch (operation_) {
default:
UNREACHABLE();
case Token::SUB:
return Builtins::UNARY_MINUS;
case Token::BIT_NOT:
return Builtins::BIT_NOT;
}
}
Handle<JSFunction> UnaryOpStub::ToJSFunction(Isolate* isolate) {
Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Object* builtin = builtins->javascript_builtin(ToJSBuiltin());
return Handle<JSFunction>(JSFunction::cast(builtin), isolate);
}
MaybeObject* UnaryOpStub::Result(Handle<Object> object, Isolate* isolate) {
Handle<JSFunction> builtin_function = ToJSFunction(isolate);
bool caught_exception;
Handle<Object> result = Execution::Call(builtin_function, object,
0, NULL, &caught_exception);
if (caught_exception) {
return Failure::Exception();
}
return *result;
}
void UnaryOpStub::UpdateStatus(Handle<Object> object) {
State old_state(state_);
if (object->IsSmi()) {
state_.Add(SMI);
if (operation_ == Token::SUB && *object == 0) {
// The result (-0) has to be represented as double.
state_.Add(HEAP_NUMBER);
}
} else if (object->IsHeapNumber()) {
state_.Add(HEAP_NUMBER);
} else {
state_.Add(GENERIC);
}
TraceTransition(old_state, state_);
}
Handle<Type> UnaryOpStub::GetType(Isolate* isolate) {
if (state_.Contains(GENERIC)) {
return handle(Type::Any(), isolate);
}
Handle<Type> type = handle(Type::None(), isolate);
if (state_.Contains(SMI)) {
type = handle(
Type::Union(type, handle(Type::Smi(), isolate)), isolate);
}
if (state_.Contains(HEAP_NUMBER)) {
type = handle(
Type::Union(type, handle(Type::Double(), isolate)), isolate);
}
return type;
}
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
@ -275,6 +354,29 @@ void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
#undef __
void UnaryOpStub::PrintBaseName(StringStream* stream) {
CodeStub::PrintBaseName(stream);
if (operation_ == Token::SUB) stream->Add("Minus");
if (operation_ == Token::BIT_NOT) stream->Add("Not");
}
void UnaryOpStub::PrintState(StringStream* stream) {
state_.Print(stream);
}
void UnaryOpStub::State::Print(StringStream* stream) const {
stream->Add("(");
SimpleListPrinter printer(stream);
if (IsEmpty()) printer.Add("None");
if (Contains(GENERIC)) printer.Add("Generic");
if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
if (Contains(SMI)) printer.Add("Smi");
stream->Add(")");
}
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@ -431,8 +533,9 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}
void CompareNilICStub::Record(Handle<Object> object) {
ASSERT(state_ != State::Generic());
void CompareNilICStub::UpdateStatus(Handle<Object> object) {
ASSERT(!state_.Contains(GENERIC));
State old_state(state_);
if (object->IsNull()) {
state_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
@ -440,24 +543,30 @@ void CompareNilICStub::Record(Handle<Object> object) {
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
state_ = State::Generic();
state_.RemoveAll();
state_.Add(GENERIC);
} else if (IsMonomorphic()) {
state_ = State::Generic();
state_.RemoveAll();
state_.Add(GENERIC);
} else {
state_.Add(MONOMORPHIC_MAP);
}
TraceTransition(old_state, state_);
}
void CompareNilICStub::State::TraceTransition(State to) const {
template<class StateType>
void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
stream.Add("[CompareNilIC : ");
Print(&stream);
stream.Add("[");
PrintBaseName(&stream);
stream.Add(": ");
from.Print(&stream);
stream.Add("=>");
to.Print(&stream);
stream.Add("]\n");
@ -466,11 +575,15 @@ void CompareNilICStub::State::TraceTransition(State to) const {
}
void CompareNilICStub::PrintName(StringStream* stream) {
stream->Add("CompareNilICStub_");
void CompareNilICStub::PrintBaseName(StringStream* stream) {
CodeStub::PrintBaseName(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue)":
"(UndefinedValue)");
}
void CompareNilICStub::PrintState(StringStream* stream) {
state_.Print(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
"(UndefinedValue|");
}
@ -481,33 +594,28 @@ void CompareNilICStub::State::Print(StringStream* stream) const {
if (Contains(UNDEFINED)) printer.Add("Undefined");
if (Contains(NULL_TYPE)) printer.Add("Null");
if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
if (Contains(UNDETECTABLE)) printer.Add("Undetectable");
if (Contains(GENERIC)) printer.Add("Generic");
stream->Add(")");
}
Handle<Type> CompareNilICStub::StateToType(
Handle<Type> CompareNilICStub::GetType(
Isolate* isolate,
State state,
Handle<Map> map) {
if (state.Contains(CompareNilICStub::GENERIC)) {
if (state_.Contains(CompareNilICStub::GENERIC)) {
return handle(Type::Any(), isolate);
}
Handle<Type> result(Type::None(), isolate);
if (state.Contains(CompareNilICStub::UNDEFINED)) {
if (state_.Contains(CompareNilICStub::UNDEFINED)) {
result = handle(Type::Union(result, handle(Type::Undefined(), isolate)),
isolate);
}
if (state.Contains(CompareNilICStub::NULL_TYPE)) {
if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
result = handle(Type::Union(result, handle(Type::Null(), isolate)),
isolate);
}
if (state.Contains(CompareNilICStub::UNDETECTABLE)) {
result = handle(Type::Union(result, handle(Type::Undetectable(), isolate)),
isolate);
} else if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
Type* type = map.is_null() ? Type::Detectable() : Type::Class(map);
result = handle(Type::Union(result, handle(type, isolate)), isolate);
}
@ -516,6 +624,16 @@ Handle<Type> CompareNilICStub::StateToType(
}
Handle<Type> CompareNilICStub::GetInputType(
Isolate* isolate,
Handle<Map> map) {
Handle<Type> output_type = GetType(isolate, map);
Handle<Type> nil_type = handle(nil_value_ == kNullValue
? Type::Null() : Type::Undefined(), isolate);
return handle(Type::Union(output_type, nil_type), isolate);
}
void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
@ -552,6 +670,12 @@ void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
CreateAllocationSiteStub stub;
stub.GetCode(isolate)->set_is_pregenerated(true);
}
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
@ -615,16 +739,15 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
bool ToBooleanStub::Record(Handle<Object> object) {
bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
Types old_types(types_);
bool to_boolean_value = types_.Record(object);
old_types.TraceTransition(types_);
bool to_boolean_value = types_.UpdateStatus(object);
TraceTransition(old_types, types_);
return to_boolean_value;
}
void ToBooleanStub::PrintName(StringStream* stream) {
stream->Add("ToBooleanStub_");
void ToBooleanStub::PrintState(StringStream* stream) {
types_.Print(stream);
}
@ -645,24 +768,7 @@ void ToBooleanStub::Types::Print(StringStream* stream) const {
}
void ToBooleanStub::Types::TraceTransition(Types to) const {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
stream.Add("[ToBooleanIC : ");
Print(&stream);
stream.Add("=>");
to.Print(&stream);
stream.Add("]\n");
stream.OutputToStdOut();
#endif
}
bool ToBooleanStub::Types::Record(Handle<Object> object) {
bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
if (object->IsUndefined()) {
Add(UNDEFINED);
return false;
@ -712,9 +818,9 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
}
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
void ElementsTransitionAndStorePlatformStub::Generate(MacroAssembler* masm) {
Label fail;
AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_);
AllocationSiteMode mode = AllocationSite::GetMode(from_, to_);
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) {

377
deps/v8/src/code-stubs.h

@ -66,11 +66,13 @@ namespace internal {
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
V(FastCloneShallowObject) \
V(CreateAllocationSite) \
V(ToBoolean) \
V(ToNumber) \
V(ArgumentsAccess) \
V(RegExpConstructResult) \
V(NumberToString) \
V(DoubleToI) \
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
@ -90,6 +92,7 @@ namespace internal {
V(ArrayConstructor) \
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
V(StoreGlobal) \
/* IC Handler stubs */ \
V(LoadField) \
V(KeyedLoadField)
@ -123,8 +126,6 @@ namespace internal {
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
// Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED {
@ -140,6 +141,8 @@ class CodeStub BASE_EMBEDDED {
// Retrieve the code for the stub. Generate the code if needed.
Handle<Code> GetCode(Isolate* isolate);
// Retrieve the code for the stub, make and return a copy of the code.
Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate);
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
}
@ -197,6 +200,8 @@ class CodeStub BASE_EMBEDDED {
return -1;
}
virtual void PrintName(StringStream* stream);
protected:
static bool CanUseFPRegisters();
@ -208,6 +213,11 @@ class CodeStub BASE_EMBEDDED {
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintBaseName(StringStream* stream);
virtual void PrintState(StringStream* stream) { }
private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
@ -236,10 +246,6 @@ class CodeStub BASE_EMBEDDED {
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@ -354,6 +360,9 @@ class HydrogenCodeStub : public CodeStub {
Handle<Code> GenerateLightweightMissCode(Isolate* isolate);
template<class StateType>
void TraceTransition(StateType from, StateType to);
private:
class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
@ -384,6 +393,22 @@ class RuntimeCallHelper {
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
// TODO(bmeurer): Move to the StringAddStub declaration once we're
// done with the translation to a hydrogen code stub.
enum StringAddFlags {
// Omit both parameter checks.
STRING_ADD_CHECK_NONE = 0,
// Check left parameter.
STRING_ADD_CHECK_LEFT = 1 << 0,
// Check right parameter.
STRING_ADD_CHECK_RIGHT = 1 << 1,
// Check both parameters.
STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
// Stub needs a frame before calling the runtime
STRING_ADD_ERECT_FRAME = 1 << 2
};
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
@ -519,6 +544,117 @@ class FastNewBlockContextStub : public PlatformCodeStub {
int MinorKey() { return slots_; }
};
class StoreGlobalStub : public HydrogenCodeStub {
public:
StoreGlobalStub(StrictModeFlag strict_mode, bool is_constant) {
bit_field_ = StrictModeBits::encode(strict_mode) |
IsConstantBits::encode(is_constant);
}
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
virtual Code::Kind GetCodeKind() const { return Code::STORE_IC; }
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
virtual Code::ExtraICState GetExtraICState() { return bit_field_; }
bool is_constant() {
return IsConstantBits::decode(bit_field_);
}
void set_is_constant(bool value) {
bit_field_ = IsConstantBits::update(bit_field_, value);
}
Representation representation() {
return Representation::FromKind(RepresentationBits::decode(bit_field_));
}
void set_representation(Representation r) {
bit_field_ = RepresentationBits::update(bit_field_, r.kind());
}
private:
virtual int NotMissMinorKey() { return GetExtraICState(); }
Major MajorKey() { return StoreGlobal; }
class StrictModeBits: public BitField<StrictModeFlag, 0, 1> {};
class IsConstantBits: public BitField<bool, 1, 1> {};
class RepresentationBits: public BitField<Representation::Kind, 2, 8> {};
int bit_field_;
DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
};
class UnaryOpStub : public HydrogenCodeStub {
public:
// Stub without type info available -> construct uninitialized
explicit UnaryOpStub(Token::Value operation)
: HydrogenCodeStub(UNINITIALIZED), operation_(operation) { }
explicit UnaryOpStub(Code::ExtraICState ic_state) :
state_(StateBits::decode(ic_state)),
operation_(OperatorBits::decode(ic_state)) { }
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
virtual Code::Kind GetCodeKind() const { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
if (state_.Contains(GENERIC)) {
return MEGAMORPHIC;
} else if (state_.IsEmpty()) {
return PREMONOMORPHIC;
} else {
return MONOMORPHIC;
}
}
virtual Code::ExtraICState GetExtraICState() {
return OperatorBits::encode(operation_) |
StateBits::encode(state_.ToIntegral());
}
Token::Value operation() { return operation_; }
Handle<JSFunction> ToJSFunction(Isolate* isolate);
Builtins::JavaScript ToJSBuiltin();
void UpdateStatus(Handle<Object> object);
MaybeObject* Result(Handle<Object> object, Isolate* isolate);
Handle<Code> GenerateCode();
Handle<Type> GetType(Isolate* isolate);
protected:
void PrintState(StringStream* stream);
void PrintBaseName(StringStream* stream);
private:
enum UnaryOpType {
SMI,
HEAP_NUMBER,
GENERIC,
NUMBER_OF_TYPES
};
class State : public EnumSet<UnaryOpType, byte> {
public:
State() : EnumSet<UnaryOpType, byte>() { }
explicit State(byte bits) : EnumSet<UnaryOpType, byte>(bits) { }
void Print(StringStream* stream) const;
};
class StateBits : public BitField<int, 0, NUMBER_OF_TYPES> { };
class OperatorBits : public BitField<Token::Value, NUMBER_OF_TYPES, 8> { };
State state_;
Token::Value operation_;
virtual CodeStub::Major MajorKey() { return UnaryOp; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
};
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
@ -620,6 +756,28 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
};
class CreateAllocationSiteStub : public HydrogenCodeStub {
public:
explicit CreateAllocationSiteStub() { }
virtual Handle<Code> GenerateCode();
virtual bool IsPregenerated() { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
Major MajorKey() { return CreateAllocationSite; }
int NotMissMinorKey() { return 0; }
DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub);
};
class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
@ -1119,50 +1277,16 @@ class ICCompareStub: public PlatformCodeStub {
class CompareNilICStub : public HydrogenCodeStub {
public:
enum CompareNilType {
UNDEFINED,
NULL_TYPE,
MONOMORPHIC_MAP,
UNDETECTABLE,
GENERIC,
NUMBER_OF_TYPES
};
class State : public EnumSet<CompareNilType, byte> {
public:
State() : EnumSet<CompareNilType, byte>(0) { }
explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
static State Generic() {
State set;
set.Add(UNDEFINED);
set.Add(NULL_TYPE);
set.Add(UNDETECTABLE);
set.Add(GENERIC);
return set;
}
Handle<Type> GetType(Isolate* isolate, Handle<Map> map = Handle<Map>());
Handle<Type> GetInputType(Isolate* isolate, Handle<Map> map);
void Print(StringStream* stream) const;
void TraceTransition(State to) const;
};
static Handle<Type> StateToType(
Isolate* isolate, State state, Handle<Map> map = Handle<Map>());
// At most 6 different types can be distinguished, because the Code object
// only has room for a single byte to hold a set and there are two more
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
CompareNilICStub(NilValue nil, State state = State())
: nil_value_(nil), state_(state) {
}
explicit CompareNilICStub(NilValue nil) : nil_value_(nil) { }
CompareNilICStub(Code::ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(init_state) {
nil_value_ = NilValueField::decode(ic_state);
state_ = State(ExtractTypesFromExtraICState(ic_state));
: HydrogenCodeStub(init_state),
nil_value_(NilValueField::decode(ic_state)),
state_(State(TypesField::decode(ic_state))) {
}
static Handle<Code> GetUninitialized(Isolate* isolate,
@ -1182,7 +1306,7 @@ class CompareNilICStub : public HydrogenCodeStub {
}
virtual InlineCacheState GetICState() {
if (state_ == State::Generic()) {
if (state_.Contains(GENERIC)) {
return MEGAMORPHIC;
} else if (state_.Contains(MONOMORPHIC_MAP)) {
return MONOMORPHIC;
@ -1195,35 +1319,49 @@ class CompareNilICStub : public HydrogenCodeStub {
Handle<Code> GenerateCode();
// extra ic state = nil_value | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) | state_.ToIntegral();
}
static byte ExtractTypesFromExtraICState(Code::ExtraICState state) {
return state & ((1 << NUMBER_OF_TYPES) - 1);
}
static NilValue ExtractNilValueFromExtraICState(Code::ExtraICState state) {
return NilValueField::decode(state);
return NilValueField::encode(nil_value_) |
TypesField::encode(state_.ToIntegral());
}
void Record(Handle<Object> object);
void UpdateStatus(Handle<Object> object);
bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
NilValue GetNilValue() const { return nil_value_; }
State GetState() const { return state_; }
void ClearState() { state_.RemoveAll(); }
virtual void PrintName(StringStream* stream);
virtual void PrintState(StringStream* stream);
virtual void PrintBaseName(StringStream* stream);
private:
friend class CompareNilIC;
enum CompareNilType {
UNDEFINED,
NULL_TYPE,
MONOMORPHIC_MAP,
GENERIC,
NUMBER_OF_TYPES
};
// At most 6 different types can be distinguished, because the Code object
// only has room for a single byte to hold a set and there are two more
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
class State : public EnumSet<CompareNilType, byte> {
public:
State() : EnumSet<CompareNilType, byte>(0) { }
explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
void Print(StringStream* stream) const;
};
CompareNilICStub(NilValue nil, InitializationState init_state)
: HydrogenCodeStub(init_state) {
nil_value_ = nil;
}
: HydrogenCodeStub(init_state), nil_value_(nil) { }
class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
class NilValueField : public BitField<NilValue, 0, 1> {};
class TypesField : public BitField<byte, 1, NUMBER_OF_TYPES> {};
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
@ -1625,6 +1763,60 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
};
class DoubleToIStub : public PlatformCodeStub {
public:
DoubleToIStub(Register source,
Register destination,
int offset,
bool is_truncating) : bit_field_(0) {
bit_field_ = SourceRegisterBits::encode(source.code_) |
DestinationRegisterBits::encode(destination.code_) |
OffsetBits::encode(offset) |
IsTruncatingBits::encode(is_truncating);
}
Register source() {
Register result = { SourceRegisterBits::decode(bit_field_) };
return result;
}
Register destination() {
Register result = { DestinationRegisterBits::decode(bit_field_) };
return result;
}
bool is_truncating() {
return IsTruncatingBits::decode(bit_field_);
}
int offset() {
return OffsetBits::decode(bit_field_);
}
void Generate(MacroAssembler* masm);
private:
static const int kBitsPerRegisterNumber = 6;
STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
class SourceRegisterBits:
public BitField<int, 0, kBitsPerRegisterNumber> {}; // NOLINT
class DestinationRegisterBits:
public BitField<int, kBitsPerRegisterNumber,
kBitsPerRegisterNumber> {}; // NOLINT
class IsTruncatingBits:
public BitField<bool, 2 * kBitsPerRegisterNumber, 1> {}; // NOLINT
class OffsetBits:
public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT
Major MajorKey() { return DoubleToI; }
int MinorKey() { return bit_field_; }
int bit_field_;
DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
};
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
@ -1755,7 +1947,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteOverrideModeBits::encode(override_mode) |
ContextCheckModeBits::encode(context_mode);
@ -2009,8 +2201,7 @@ class ToBooleanStub: public HydrogenCodeStub {
byte ToByte() const { return ToIntegral(); }
void Print(StringStream* stream) const;
void TraceTransition(Types to) const;
bool Record(Handle<Object> object);
bool UpdateStatus(Handle<Object> object);
bool NeedsMap() const;
bool CanBeUndetectable() const;
bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
@ -2023,7 +2214,7 @@ class ToBooleanStub: public HydrogenCodeStub {
explicit ToBooleanStub(Code::ExtraICState state)
: types_(static_cast<byte>(state)) { }
bool Record(Handle<Object> object);
bool UpdateStatus(Handle<Object> object);
Types GetTypes() { return types_; }
virtual Handle<Code> GenerateCode();
@ -2032,7 +2223,7 @@ class ToBooleanStub: public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
virtual void PrintState(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
@ -2070,9 +2261,55 @@ class ToBooleanStub: public HydrogenCodeStub {
};
class ElementsTransitionAndStoreStub : public PlatformCodeStub {
class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsTransitionAndStoreStub(ElementsKind from_kind,
ElementsKind to_kind,
bool is_jsarray,
KeyedAccessStoreMode store_mode)
: from_kind_(from_kind),
to_kind_(to_kind),
is_jsarray_(is_jsarray),
store_mode_(store_mode) {}
ElementsKind from_kind() const { return from_kind_; }
ElementsKind to_kind() const { return to_kind_; }
bool is_jsarray() const { return is_jsarray_; }
KeyedAccessStoreMode store_mode() const { return store_mode_; }
Handle<Code> GenerateCode();
void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
class FromBits: public BitField<ElementsKind, 0, 8> {};
class ToBits: public BitField<ElementsKind, 8, 8> {};
class IsJSArrayBits: public BitField<bool, 16, 1> {};
class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
Major MajorKey() { return ElementsTransitionAndStore; }
int NotMissMinorKey() {
return FromBits::encode(from_kind_) |
ToBits::encode(to_kind_) |
IsJSArrayBits::encode(is_jsarray_) |
StoreModeBits::encode(store_mode_);
}
ElementsKind from_kind_;
ElementsKind to_kind_;
bool is_jsarray_;
KeyedAccessStoreMode store_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
};
// TODO(bmeurer) Remove this when compiled transitions is enabled
class ElementsTransitionAndStorePlatformStub : public PlatformCodeStub {
public:
ElementsTransitionAndStorePlatformStub(ElementsKind from,
ElementsKind to,
bool is_jsarray,
StrictModeFlag strict_mode,
@ -2107,7 +2344,7 @@ class ElementsTransitionAndStoreStub : public PlatformCodeStub {
StrictModeFlag strict_mode_;
KeyedAccessStoreMode store_mode_;
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStorePlatformStub);
};

4
deps/v8/src/codegen.h

@ -97,10 +97,10 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
// If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
// |allocation_site_info_found| may be NULL.
// |allocation_memento_found| may be NULL.
static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
AllocationSiteMode mode,
Label* allocation_site_info_found);
Label* allocation_memento_found);
static void GenerateSmiToDouble(MacroAssembler* masm,
AllocationSiteMode mode,
Label* fail);

92
deps/v8/src/collection.js

@ -34,6 +34,7 @@
var $Set = global.Set;
var $Map = global.Map;
var $WeakMap = global.WeakMap;
var $WeakSet = global.WeakSet;
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
@ -240,7 +241,7 @@ SetUpMap();
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
%WeakCollectionInitialize(this);
} else {
return new $WeakMap();
}
@ -255,7 +256,7 @@ function WeakMapGet(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapGet(this, key);
return %WeakCollectionGet(this, key);
}
@ -267,7 +268,7 @@ function WeakMapSet(key, value) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapSet(this, key, value);
return %WeakCollectionSet(this, key, value);
}
@ -279,7 +280,7 @@ function WeakMapHas(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapHas(this, key);
return %WeakCollectionHas(this, key);
}
@ -291,7 +292,7 @@ function WeakMapDelete(key) {
if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakMapDelete(this, key);
return %WeakCollectionDelete(this, key);
}
@ -301,7 +302,7 @@ function WeakMapClear() {
['WeakMap.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%WeakMapInitialize(this);
%WeakCollectionInitialize(this);
}
@ -325,3 +326,82 @@ function SetUpWeakMap() {
}
SetUpWeakMap();
// -------------------------------------------------------------------
// Harmony WeakSet
function WeakSetConstructor() {
if (%_IsConstructCall()) {
%WeakCollectionInitialize(this);
} else {
return new $WeakSet();
}
}
function WeakSetAdd(value) {
if (!IS_WEAKSET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.add', this]);
}
if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
throw %MakeTypeError('invalid_weakset_value', [this, value]);
}
return %WeakCollectionSet(this, value, true);
}
function WeakSetHas(value) {
if (!IS_WEAKSET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.has', this]);
}
if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
throw %MakeTypeError('invalid_weakset_value', [this, value]);
}
return %WeakCollectionHas(this, value);
}
function WeakSetDelete(value) {
if (!IS_WEAKSET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.delete', this]);
}
if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
throw %MakeTypeError('invalid_weakset_value', [this, value]);
}
return %WeakCollectionDelete(this, value);
}
function WeakSetClear() {
if (!IS_WEAKSET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%WeakCollectionInitialize(this);
}
// -------------------------------------------------------------------
function SetUpWeakSet() {
%CheckIsBootstrapping();
%SetCode($WeakSet, WeakSetConstructor);
%FunctionSetPrototype($WeakSet, new $Object());
%SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
// Set up the non-enumerable functions on the WeakSet prototype object.
InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
"add", WeakSetAdd,
"has", WeakSetHas,
"delete", WeakSetDelete,
"clear", WeakSetClear
));
}
SetUpWeakSet();

1
deps/v8/src/compilation-cache.cc

@ -86,6 +86,7 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
return result;
}
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {

9
deps/v8/src/compiler.cc

@ -449,6 +449,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
return SetLastStatus(SUCCEEDED);
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@ -564,8 +565,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
if (info->is_eval()) {
StackTraceFrameIterator it(isolate);
if (!it.done()) {
script->set_eval_from_shared(
JSFunction::cast(it.frame()->function())->shared());
script->set_eval_from_shared(it.frame()->function()->shared());
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
it.frame()->pc() - code->instruction_start());
@ -1199,9 +1199,9 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<Code> code = info->code();
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
if (script->name()->IsString()) {
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
@ -1215,7 +1215,8 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*code,
*shared,
info,
shared->DebugName()));
info->isolate()->heap()->empty_string(),
line_num));
}
}

2
deps/v8/src/compiler.h

@ -560,8 +560,6 @@ class OptimizingCompiler: public ZoneObject {
class Compiler : public AllStatic {
public:
static const int kMaxInliningLevels = 3;
// Call count before primitive functions trigger their own optimization.
static const int kCallsUntilPrimitiveOpt = 200;

6
deps/v8/src/contexts.cc

@ -88,6 +88,7 @@ JSObject* Context::global_proxy() {
return native_context()->global_proxy_object();
}
void Context::set_global_proxy(JSObject* object) {
native_context()->set_global_proxy_object(object);
}
@ -123,7 +124,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
if (context->IsNativeContext() ||
context->IsWithContext() ||
(context->IsFunctionContext() && context->has_extension())) {
Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
Handle<JSReceiver> object(
JSReceiver::cast(context->extension()), isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
@ -133,6 +135,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
} else {
*attributes = object->GetPropertyAttribute(*name);
}
if (isolate->has_pending_exception()) return Handle<Object>();
if (*attributes != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",

28
deps/v8/src/conversions-inl.h

@ -515,6 +515,32 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
end,
false,
allow_trailing_junk);
// It could be an explicit octal value.
} else if ((flags & ALLOW_OCTAL) && (*current == 'o' || *current == 'O')) {
++current;
if (current == end || !isDigit(*current, 8) || sign != NONE) {
return JunkStringValue(); // "0o".
}
return InternalStringToIntDouble<3>(unicode_cache,
current,
end,
false,
allow_trailing_junk);
// It could be a binary value.
} else if ((flags & ALLOW_BINARY) && (*current == 'b' || *current == 'B')) {
++current;
if (current == end || !isBinaryDigit(*current) || sign != NONE) {
return JunkStringValue(); // "0b".
}
return InternalStringToIntDouble<1>(unicode_cache,
current,
end,
false,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
@ -524,7 +550,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
}
}
bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
bool octal = leading_zero && (flags & ALLOW_IMPLICIT_OCTAL) != 0;
// Copy significant digits of the integer part (if any) to the buffer.
while (*current >= '0' && *current <= '9') {

11
deps/v8/src/conversions.h

@ -52,6 +52,11 @@ inline bool isDigit(int x, int radix) {
}
inline bool isBinaryDigit(int x) {
return x == '0' || x == '1';
}
// The fast double-to-(unsigned-)int conversion routine does not guarantee
// rounding towards zero.
// For NaN and values outside the int range, return INT_MIN or INT_MAX.
@ -108,8 +113,10 @@ inline uint32_t DoubleToUint32(double x) {
enum ConversionFlags {
NO_FLAGS = 0,
ALLOW_HEX = 1,
ALLOW_OCTALS = 2,
ALLOW_TRAILING_JUNK = 4
ALLOW_OCTAL = 2,
ALLOW_IMPLICIT_OCTAL = 4,
ALLOW_BINARY = 8,
ALLOW_TRAILING_JUNK = 16
};

2
deps/v8/src/counters.cc

@ -56,6 +56,7 @@ void* Histogram::CreateHistogram() const {
CreateHistogram(name_, min_, max_, num_buckets_);
}
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
@ -67,6 +68,7 @@ void HistogramTimer::Start() {
}
}
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {

2
deps/v8/src/cpu-profiler-inl.h

@ -70,7 +70,7 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;
}

84
deps/v8/src/cpu-profiler.cc

@ -52,18 +52,18 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
enqueue_order_(0) {
last_code_event_id_(0), last_processed_code_event_id_(0) {
}
void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
event.generic.order = ++enqueue_order_;
event.generic.order = ++last_code_event_id_;
events_buffer_.Enqueue(event);
}
void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
TickSampleEventRecord record(enqueue_order_);
TickSampleEventRecord record(last_code_event_id_);
TickSample* sample = &record.sample;
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
@ -76,7 +76,14 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
}
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
void ProfilerEventsProcessor::StopSynchronously() {
if (!running_) return;
running_ = false;
Join();
}
bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
switch (record.generic.type) {
@ -90,17 +97,18 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
#undef PROFILER_TYPE_CASE
default: return true; // Skip record.
}
*dequeue_order = record.generic.order;
last_processed_code_event_id_ = record.generic.order;
return true;
}
return false;
}
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
bool ProfilerEventsProcessor::ProcessTicks() {
while (true) {
if (!ticks_from_vm_buffer_.IsEmpty()
&& ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
&& ticks_from_vm_buffer_.Peek()->order ==
last_processed_code_event_id_) {
TickSampleEventRecord record;
ticks_from_vm_buffer_.Dequeue(&record);
generator_->RecordTickSample(record.sample);
@ -115,7 +123,8 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
// will get far behind, a record may be modified right under its
// feet.
TickSampleEventRecord record = *rec;
if (record.order == dequeue_order) {
if (record.order != last_processed_code_event_id_) return true;
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
@ -123,48 +132,37 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
} else {
return true;
}
}
}
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
// Process ticks until we have any.
if (ProcessTicks(dequeue_order)) {
// All ticks of the current dequeue_order are processed,
if (ProcessTicks()) {
// All ticks of the current last_processed_code_event_id_ are processed,
// proceed to the next code event.
ProcessCodeEvent(&dequeue_order);
ProcessCodeEvent();
}
YieldCPU();
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
// Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
do {
ProcessTicks();
} while (ProcessCodeEvent());
}
int CpuProfiler::GetProfilesCount() {
// The count of profiles doesn't depend on a security token.
return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length();
return profiles_->profiles()->length();
}
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
const int token = token_enumerator_->GetTokenId(security_token);
return profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
const int token = token_enumerator_->GetTokenId(security_token);
return profiles_->GetProfile(token, uid);
CpuProfile* CpuProfiler::GetProfile(int index) {
return profiles_->profiles()->at(index);
}
@ -186,11 +184,6 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
bool CpuProfiler::HasDetachedProfiles() {
return profiles_->HasDetachedProfiles();
}
static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag) {
return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG
@ -208,8 +201,7 @@ void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
rec->start = entry_point;
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
profiles_->GetName(name),
TokenEnumerator::kInheritsSecurityToken);
profiles_->GetName(name));
rec->size = 1;
rec->shared = NULL;
processor_->Enqueue(evt_rec);
@ -280,7 +272,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry = profiles_->NewCodeEntry(
tag,
profiles_->GetFunctionName(shared->DebugName()),
TokenEnumerator::kNoSecurityToken,
CodeEntry::kEmptyNamePrefix,
profiles_->GetName(source),
line);
@ -306,7 +297,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->entry = profiles_->NewCodeEntry(
tag,
profiles_->GetName(args_count),
TokenEnumerator::kInheritsSecurityToken,
"args_count: ");
rec->size = code->ExecutableSize();
rec->shared = NULL;
@ -345,7 +335,6 @@ void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
profiles_->GetName(name),
TokenEnumerator::kInheritsSecurityToken,
"get ");
rec->size = 1;
rec->shared = NULL;
@ -361,7 +350,6 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
rec->entry = profiles_->NewCodeEntry(
Logger::REG_EXP_TAG,
profiles_->GetName(source),
TokenEnumerator::kInheritsSecurityToken,
"RegExp: ");
rec->size = code->ExecutableSize();
processor_->Enqueue(evt_rec);
@ -376,7 +364,6 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
rec->entry = profiles_->NewCodeEntry(
Logger::CALLBACK_TAG,
profiles_->GetName(name),
TokenEnumerator::kInheritsSecurityToken,
"set ");
rec->size = 1;
rec->shared = NULL;
@ -388,7 +375,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate)
: isolate_(isolate),
profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL),
need_to_stop_sampler_(false),
@ -403,7 +389,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
: isolate_(isolate),
profiles_(test_profiles),
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(test_generator),
processor_(test_processor),
need_to_stop_sampler_(false),
@ -413,7 +398,6 @@ CpuProfiler::CpuProfiler(Isolate* isolate,
CpuProfiler::~CpuProfiler() {
ASSERT(!is_profiling_);
delete token_enumerator_;
delete profiles_;
}
@ -423,6 +407,7 @@ void CpuProfiler::ResetProfiles() {
profiles_ = new CpuProfilesCollection();
}
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) {
StartProcessorIfNotStarted();
@ -469,10 +454,7 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(title);
CpuProfile* result =
profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
title,
actual_sampling_rate);
CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
if (result != NULL) {
result->Print();
}
@ -480,13 +462,12 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
}
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
CpuProfile* CpuProfiler::StopProfiling(String* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
int token = token_enumerator_->GetTokenId(security_token);
return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
return profiles_->StopProfiling(profile_title, actual_sampling_rate);
}
@ -504,8 +485,7 @@ void CpuProfiler::StopProcessor() {
need_to_stop_sampler_ = false;
}
is_profiling_ = false;
processor_->Stop();
processor_->Join();
processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;

32
deps/v8/src/cpu-profiler.h

@ -44,7 +44,6 @@ class CompilationInfo;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@ -111,18 +110,8 @@ class TickSampleEventRecord {
// The parameterless constructor is used when we dequeue data from
// the ticks buffer.
TickSampleEventRecord() { }
explicit TickSampleEventRecord(unsigned order)
: filler(1),
order(order) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
explicit TickSampleEventRecord(unsigned order) : order(order) { }
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
// can overflow, TickSample can have all fields reset), we are
// forced to use an artificial filler field.
int filler;
unsigned order;
TickSample sample;
@ -156,7 +145,7 @@ class ProfilerEventsProcessor : public Thread {
// Thread control.
virtual void Run();
inline void Stop() { running_ = false; }
void StopSynchronously();
INLINE(bool running()) { return running_; }
void Enqueue(const CodeEventsContainer& event);
@ -171,15 +160,16 @@ class ProfilerEventsProcessor : public Thread {
private:
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
bool ProcessCodeEvent();
bool ProcessTicks();
ProfileGenerator* generator_;
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
unsigned last_code_event_id_;
unsigned last_processed_code_event_id_;
};
@ -208,13 +198,11 @@ class CpuProfiler {
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(Object* security_token, String* title);
CpuProfile* StopProfiling(String* title);
int GetProfilesCount();
CpuProfile* GetProfile(Object* security_token, int index);
CpuProfile* FindProfile(Object* security_token, unsigned uid);
CpuProfile* GetProfile(int index);
void DeleteAllProfiles();
void DeleteProfile(CpuProfile* profile);
bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
TickSample* TickSampleEvent();
@ -251,6 +239,9 @@ class CpuProfiler {
return &is_profiling_;
}
ProfileGenerator* generator() const { return generator_; }
ProfilerEventsProcessor* processor() const { return processor_; }
private:
void StartProcessorIfNotStarted();
void StopProcessorIfLastProfile(const char* title);
@ -261,7 +252,6 @@ class CpuProfiler {
Isolate* isolate_;
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
TokenEnumerator* token_enumerator_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;

8
deps/v8/src/d8-debug.cc

@ -50,14 +50,12 @@ void PrintPrompt() {
}
void HandleDebugEvent(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data) {
void HandleDebugEvent(const Debug::EventDetails& event_details) {
// TODO(svenpanne) There should be a way to retrieve this in the callback.
Isolate* isolate = Isolate::GetCurrent();
HandleScope scope(isolate);
DebugEvent event = event_details.GetEvent();
// Check for handled event.
if (event != Break && event != Exception && event != AfterCompile) {
return;
@ -67,6 +65,7 @@ void HandleDebugEvent(DebugEvent event,
// Get the toJSONProtocol function on the event and get the JSON format.
Local<String> to_json_fun_name = String::New("toJSONProtocol");
Handle<Object> event_data = event_details.GetEventData();
Local<Function> to_json_fun =
Local<Function>::Cast(event_data->Get(to_json_fun_name));
Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL);
@ -91,6 +90,7 @@ void HandleDebugEvent(DebugEvent event,
// Get the debug command processor.
Local<String> fun_name = String::New("debugCommandProcessor");
Handle<Object> exec_state = event_details.GetExecutionState();
Local<Function> fun = Local<Function>::Cast(exec_state->Get(fun_name));
Local<Object> cmd_processor =
Local<Object>::Cast(fun->Call(exec_state, 0, NULL));

5
deps/v8/src/d8-debug.h

@ -36,10 +36,7 @@
namespace v8 {
void HandleDebugEvent(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data);
void HandleDebugEvent(const Debug::EventDetails& event_details);
// Start the remove debugger connecting to a V8 debugger agent on the specified
// port.

4
deps/v8/src/d8.cc

@ -810,7 +810,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
v8::Debug::SetDebugEventListener2(HandleDebugEvent);
}
#endif // ENABLE_DEBUGGER_SUPPORT
}
@ -1087,6 +1087,7 @@ static void ReadBufferWeakCallback(v8::Isolate* isolate,
array_buffer->Dispose();
}
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
@ -1581,6 +1582,7 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU();
#ifndef V8_SHARED
i::FLAG_harmony_array_buffer = true;
i::FLAG_harmony_typed_arrays = true;

14
deps/v8/src/d8.gyp

@ -31,8 +31,9 @@
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 0,
},
'includes': ['../build/common.gypi'],
'includes': ['../build/toolchain.gypi', '../build/features.gypi'],
'targets': [
{
'target_name': 'd8',
@ -78,6 +79,17 @@
'../src/third_party/vtune/v8vtune.gyp:v8_vtune',
],
}],
['v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(DEPTH)/third_party/icu/icu.gyp:icudata',
],
}],
],
},
{

1
deps/v8/src/dateparser.cc

@ -112,6 +112,7 @@ bool DateParser::TimeComposer::Write(FixedArray* output) {
return true;
}
bool DateParser::TimeZoneComposer::Write(FixedArray* output) {
if (sign_ != kNone) {
if (hour_ == kNone) hour_ = 0;

21
deps/v8/src/debug.cc

@ -965,7 +965,7 @@ Object* Debug::Break(Arguments args) {
// Get the debug info (create it if it does not exist).
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
Handle<SharedFunctionInfo>(frame->function()->shared());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Find the break point where execution has stopped.
@ -1348,8 +1348,7 @@ void Debug::FloodHandlerWithOneShot() {
JavaScriptFrame* frame = it.frame();
if (frame->HasHandler()) {
// Flood the function with the catch block with break points
JSFunction* function = JSFunction::cast(frame->function());
FloodWithOneShot(Handle<JSFunction>(function));
FloodWithOneShot(Handle<JSFunction>(frame->function()));
return;
}
}
@ -1415,13 +1414,13 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// breakpoints.
frames_it.Advance();
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
return;
}
// Get the debug info (create it if it does not exist).
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<JSFunction> function(frame->function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
@ -1486,15 +1485,14 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
frames_it.Advance();
}
// Skip builtin functions on the stack.
while (!frames_it.done() &&
JSFunction::cast(frames_it.frame()->function())->IsBuiltin()) {
while (!frames_it.done() && frames_it.frame()->function()->IsBuiltin()) {
frames_it.Advance();
}
// Step out: If there is a JavaScript caller frame, we need to
// flood it with breakpoints.
if (!frames_it.done()) {
// Fill the function to return to with one-shot break points.
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
JSFunction* function = frames_it.frame()->function();
FloodWithOneShot(Handle<JSFunction>(function));
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
@ -1811,6 +1809,7 @@ void Debug::ClearStepping() {
thread_local_.step_count_ = 0;
}
// Clears all the one-shot break points that are currently set. Normally this
// function is called each time a break point is hit as one shot break points
// are used to support stepping.
@ -1907,7 +1906,7 @@ static void CollectActiveFunctionsFromThread(
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized()) {
List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
List<JSFunction*> functions(FLAG_max_inlining_levels + 1);
frame->GetFunctions(&functions);
for (int i = 0; i < functions.length(); i++) {
JSFunction* function = functions[i];
@ -1915,7 +1914,7 @@ static void CollectActiveFunctionsFromThread(
function->shared()->code()->set_gc_metadata(active_code_marker);
}
} else if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
active_functions->Add(Handle<JSFunction>(function));
function->shared()->code()->set_gc_metadata(active_code_marker);
@ -1932,7 +1931,7 @@ static void RedirectActivationsToRecompiledCodeOnThread(
if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
JSFunction* function = JSFunction::cast(frame->function());
JSFunction* function = frame->function();
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);

39
deps/v8/src/deoptimizer.cc

@ -43,7 +43,13 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
OS::CommitPageSize(),
#if defined(__native_client__)
// The Native Client port of V8 uses an interpreter,
// so code pages don't need PROT_EXEC.
NOT_EXECUTABLE,
#else
EXECUTABLE,
#endif
NULL);
}
@ -186,7 +192,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
// Get the function and code from the frame.
JSFunction* function = JSFunction::cast(frame->function());
JSFunction* function = frame->function();
Code* code = frame->LookupCode();
// Locate the deoptimization point in the code. As we are at a call the
@ -542,6 +548,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
if (function->IsSmi()) {
function = NULL;
}
ASSERT(from != NULL);
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
@ -573,13 +580,11 @@ Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
switch (bailout_type_) {
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
ASSERT(from_ == NULL);
return function->code();
case Deoptimizer::LAZY: {
Code* compiled_code =
isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
return (compiled_code == NULL)
? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_))
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
case Deoptimizer::OSR: {
@ -1609,7 +1614,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
JavaScriptFrame* frame = it->frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate_);
Handle<JSFunction> function(frame->function(), isolate_);
Handle<JSObject> arguments;
for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
if (frame->GetExpression(i) == isolate_->heap()->arguments_marker()) {
@ -1619,11 +1624,9 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
if (arguments.is_null()) {
if (frame->has_adapted_arguments()) {
// Use the arguments adapter frame we just built to materialize the
// arguments object. FunctionGetArguments can't throw an exception,
// so cast away the doubt with an assert.
arguments = Handle<JSObject>(JSObject::cast(
Accessors::FunctionGetArguments(*function,
NULL)->ToObjectUnchecked()));
// arguments object. FunctionGetArguments can't throw an exception.
arguments = Handle<JSObject>::cast(
Accessors::FunctionGetArguments(function));
values.RewindBy(length);
} else {
// Construct an arguments object and copy the parameters to a newly
@ -2368,8 +2371,8 @@ void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
if (loop_depth == loop_nesting_level) {
uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
if (static_cast<int>(loop_depth) == loop_nesting_level) {
// Loop back edge has the loop depth that we want to patch.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
@ -2400,8 +2403,8 @@ void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
if (loop_depth <= loop_nesting_level) {
uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
if (static_cast<int>(loop_depth) <= loop_nesting_level) {
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
RevertInterruptCodeAt(unoptimized_code,
@ -2432,13 +2435,13 @@ void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
CHECK_LE(loop_depth, Code::kMaxLoopNestingMarker);
uint32_t loop_depth = Memory::uint32_at(back_edge_cursor + 2 * kIntSize);
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
CHECK_EQ((loop_depth <= loop_nesting_level),
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
@ -3065,7 +3068,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
expression_stack_ = new Object*[expression_count_];
// Get the source position using the unoptimized code.
Address pc = reinterpret_cast<Address>(output_frame->GetPc());
Code* code = Code::cast(deoptimizer->isolate()->heap()->FindCodeObject(pc));
Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
source_position_ = code->SourcePosition(pc);
for (int i = 0; i < expression_count_; i++) {

2
deps/v8/src/disassembler.cc

@ -360,6 +360,8 @@ void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
return 0;
}
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER

1
deps/v8/src/elements-kind.cc

@ -83,6 +83,7 @@ ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {

1
deps/v8/src/execution.cc

@ -865,6 +865,7 @@ Object* Execution::DebugBreakHelper() {
return isolate->heap()->undefined_value();
}
void Execution::ProcessDebugMessages(bool debug_command_only) {
Isolate* isolate = Isolate::Current();
// Clear the debug command request flag.

2
deps/v8/src/extensions/i18n/break-iterator.cc

@ -82,6 +82,7 @@ void BreakIterator::DeleteBreakIterator(v8::Isolate* isolate,
object->Dispose(isolate);
}
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
@ -90,6 +91,7 @@ static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
"that is not a BreakIterator.")));
}
// Deletes the old value and sets the adopted text in corresponding
// JavaScript object.
icu::UnicodeString* ResetAdoptedText(

3
deps/v8/src/extensions/i18n/collator.cc

@ -76,6 +76,7 @@ void Collator::DeleteCollator(v8::Isolate* isolate,
object->Dispose(isolate);
}
// Throws a JavaScript exception.
static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
// Returns undefined, and schedules an exception to be thrown.
@ -84,11 +85,13 @@ static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
"that is not a Collator.")));
}
// When there's an ICU error, throw a JavaScript error with |message|.
static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
}
// static
void Collator::JSInternalCompare(
const v8::FunctionCallbackInfo<v8::Value>& args) {

1
deps/v8/src/extensions/i18n/i18n-extension.cc

@ -108,6 +108,7 @@ v8::Handle<v8::FunctionTemplate> Extension::GetNativeFunction(
return v8::Handle<v8::FunctionTemplate>();
}
void Extension::Register() {
static Extension i18n_extension;
static v8::DeclareExtension extension_declaration(&i18n_extension);

7
deps/v8/src/extensions/i18n/i18n-utils.cc

@ -42,6 +42,7 @@ void Utils::StrNCopy(char* dest, int length, const char* src) {
dest[length - 1] = '\0';
}
// static
bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
icu::UnicodeString* output) {
@ -54,6 +55,7 @@ bool Utils::V8StringToUnicodeString(const v8::Handle<v8::Value>& input,
return true;
}
// static
bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@ -74,6 +76,7 @@ bool Utils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
return false;
}
// static
bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@ -95,6 +98,7 @@ bool Utils::ExtractIntegerSetting(const v8::Handle<v8::Object>& settings,
return false;
}
// static
bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
const char* setting,
@ -116,6 +120,7 @@ bool Utils::ExtractBooleanSetting(const v8::Handle<v8::Object>& settings,
return false;
}
// static
void Utils::AsciiToUChar(const char* source,
int32_t source_length,
@ -135,6 +140,7 @@ void Utils::AsciiToUChar(const char* source,
target[length - 1] = 0x0u;
}
// static
// Chrome Linux doesn't like static initializers in class, so we create
// template on demand.
@ -153,6 +159,7 @@ v8::Local<v8::ObjectTemplate> Utils::GetTemplate(v8::Isolate* isolate) {
return v8::Local<v8::ObjectTemplate>::New(isolate, icu_template);
}
// static
// Chrome Linux doesn't like static initializers in class, so we create
// template on demand. This one has 2 internal fields.

3
deps/v8/src/extensions/i18n/locale.cc

@ -82,6 +82,7 @@ void JSCanonicalizeLanguageTag(
args.GetReturnValue().Set(v8::String::New(result));
}
void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Expect service name which is a string.
if (args.Length() != 1 || !args[0]->IsString()) {
@ -131,6 +132,7 @@ void JSAvailableLocalesOf(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(locales);
}
void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
icu::Locale default_locale;
@ -147,6 +149,7 @@ void JSGetDefaultICULocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8::String::New("und"));
}
void JSGetLanguageTagVariants(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch;

4
deps/v8/src/extensions/i18n/number-format.cc

@ -148,10 +148,10 @@ void NumberFormat::JSInternalParse(
args.GetReturnValue().Set(result.getDouble());
return;
case icu::Formattable::kLong:
args.GetReturnValue().Set(v8::Number::New(result.getLong()));
args.GetReturnValue().Set(result.getLong());
return;
case icu::Formattable::kInt64:
args.GetReturnValue().Set(v8::Number::New(result.getInt64()));
args.GetReturnValue().Set(static_cast<double>(result.getInt64()));
return;
default:
return;

13
deps/v8/src/factory.cc

@ -178,6 +178,7 @@ Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
String);
}
// Internalized strings are created in the old generation (data space).
Handle<String> Factory::InternalizeString(Handle<String> string) {
CALL_HEAP_FUNCTION(isolate(),
@ -185,6 +186,7 @@ Handle<String> Factory::InternalizeString(Handle<String> string) {
String);
}
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->InternalizeOneByteString(string),
@ -517,6 +519,14 @@ Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
}
Handle<AllocationSite> Factory::NewAllocationSite() {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateAllocationSite(),
AllocationSite);
}
Handle<Map> Factory::NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind) {
@ -925,7 +935,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
initial_map->set_constructor(*function);
}
SetPrototypeProperty(function, prototype);
JSFunction::SetPrototype(function, prototype);
return function;
}
@ -1235,6 +1245,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
JSMessageObject);
}
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateSharedFunctionInfo(*name),

78
deps/v8/src/factory.h

@ -243,6 +243,8 @@ class Factory {
Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
Handle<AllocationSite> NewAllocationSite();
Handle<Map> NewMap(
InstanceType type,
int instance_size,
@ -564,6 +566,82 @@ Handle<Object> Factory::NewNumberFromSize(size_t value,
}
// Used to "safely" transition from pointer-based runtime code to Handle-based
// runtime code. When a GC happens during the called Handle-based code, a
// failure object is returned to the pointer-based code to cause it abort and
// re-trigger a gc of it's own. Since this double-gc will cause the Handle-based
// code to be called twice, it must be idempotent.
class IdempotentPointerToHandleCodeTrampoline {
public:
explicit IdempotentPointerToHandleCodeTrampoline(Isolate* isolate)
: isolate_(isolate) {}
template<typename R>
MUST_USE_RESULT MaybeObject* Call(R (*function)()) {
int collections = isolate_->heap()->gc_count();
(*function)();
return (collections == isolate_->heap()->gc_count())
? isolate_->heap()->true_value()
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
template<typename R>
MUST_USE_RESULT MaybeObject* CallWithReturnValue(R (*function)()) {
int collections = isolate_->heap()->gc_count();
Object* result = (*function)();
return (collections == isolate_->heap()->gc_count())
? result
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
template<typename R, typename P1>
MUST_USE_RESULT MaybeObject* Call(R (*function)(P1), P1 p1) {
int collections = isolate_->heap()->gc_count();
(*function)(p1);
return (collections == isolate_->heap()->gc_count())
? isolate_->heap()->true_value()
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
template<typename R, typename P1>
MUST_USE_RESULT MaybeObject* CallWithReturnValue(
R (*function)(P1),
P1 p1) {
int collections = isolate_->heap()->gc_count();
Object* result = (*function)(p1);
return (collections == isolate_->heap()->gc_count())
? result
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
template<typename R, typename P1, typename P2>
MUST_USE_RESULT MaybeObject* Call(
R (*function)(P1, P2),
P1 p1,
P2 p2) {
int collections = isolate_->heap()->gc_count();
(*function)(p1, p2);
return (collections == isolate_->heap()->gc_count())
? isolate_->heap()->true_value()
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
template<typename R, typename P1, typename P2>
MUST_USE_RESULT MaybeObject* CallWithReturnValue(
R (*function)(P1, P2),
P1 p1,
P2 p2) {
int collections = isolate_->heap()->gc_count();
Object* result = (*function)(p1, p2);
return (collections == isolate_->heap()->gc_count())
? result
: reinterpret_cast<MaybeObject*>(Failure::RetryAfterGC());
}
private:
Isolate* isolate_;
};
} } // namespace v8::internal

18
deps/v8/src/flag-definitions.h

@ -171,6 +171,8 @@ DEFINE_bool(harmony_array_buffer, false,
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony_numeric_literals, false,
"enable harmony numeric literals (0o77, 0b11)")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@ -180,6 +182,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony, harmony_numeric_literals)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@ -187,7 +190,7 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
DEFINE_bool(compiled_transitions, true, "use optimizing compiler to "
"generate array elements transition stubs")
DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
"generate keyed store stubs")
@ -195,6 +198,9 @@ DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
@ -209,17 +215,19 @@ DEFINE_bool(string_slices, true, "use string slices")
// Flags for Crankshaft.
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "optimization filter")
DEFINE_string(hydrogen_filter, "*", "optimization filter")
DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis")
DEFINE_bool(use_allocation_folding, true, "use allocation folding")
DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
DEFINE_int(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_int(max_inlined_nodes, 196,
"maximum number of AST nodes considered for a single inlining")
DEFINE_int(max_inlined_nodes_cumulative, 196,
DEFINE_int(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
@ -236,6 +244,7 @@ DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
DEFINE_bool(trace_track_allocation_sites, false,
"trace the tracking of allocation sites")
DEFINE_bool(trace_migration, false, "trace object migration")
@ -248,6 +257,7 @@ DEFINE_int(deopt_every_n_times,
DEFINE_int(deopt_every_n_garbage_collections,
0,
"deoptimize every n garbage collections")
DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
@ -348,6 +358,8 @@ DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
"enable use of VFP3 instructions if available")
DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_neon, true,
"enable use of NEON instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
DEFINE_bool(enable_movw_movt, false,

6
deps/v8/src/frames-inl.h

@ -274,10 +274,8 @@ inline bool JavaScriptFrame::has_adapted_arguments() const {
}
inline Object* JavaScriptFrame::function() const {
Object* result = function_slot_object();
ASSERT(result->IsJSFunction());
return result;
inline JSFunction* JavaScriptFrame::function() const {
return JSFunction::cast(function_slot_object());
}

44
deps/v8/src/frames.cc

@ -202,9 +202,10 @@ void StackTraceFrameIterator::Advance() {
}
}
bool StackTraceFrameIterator::IsValidFrame() {
if (!frame()->function()->IsJSFunction()) return false;
Object* script = JSFunction::cast(frame()->function())->shared()->script();
Object* script = frame()->function()->shared()->script();
// Don't show functions from native scripts to user.
return (script->IsScript() &&
Script::TYPE_NATIVE != Script::cast(script)->type()->value());
@ -672,7 +673,7 @@ void StubFrame::Iterate(ObjectVisitor* v) const {
Code* StubFrame::unchecked_code() const {
return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc()));
return static_cast<Code*>(isolate()->FindCodeObject(pc()));
}
@ -723,8 +724,7 @@ int JavaScriptFrame::GetArgumentsLength() const {
Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
return function->code();
return function()->code();
}
@ -732,8 +732,7 @@ int JavaScriptFrame::GetNumberOfIncomingArguments() const {
ASSERT(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
JSFunction* function = JSFunction::cast(this->function());
return function->shared()->formal_parameter_count();
return function()->shared()->formal_parameter_count();
}
@ -744,7 +743,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
functions->Add(JSFunction::cast(function()));
functions->Add(function());
}
@ -753,7 +752,7 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
Code* code_pointer = LookupCode();
int offset = static_cast<int>(pc() - code_pointer->address());
FrameSummary summary(receiver(),
JSFunction::cast(function()),
function(),
code_pointer,
offset,
IsConstructor());
@ -774,9 +773,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
Object* maybe_fun = frame->function();
if (maybe_fun->IsJSFunction()) {
JSFunction* fun = JSFunction::cast(maybe_fun);
JSFunction* fun = frame->function();
fun->PrintName();
Code* js_code = frame->unchecked_code();
Address pc = frame->pc();
@ -786,7 +783,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
Code* code = Code::cast(
v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
v8::internal::Isolate::Current()->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
@ -806,9 +803,6 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
PrintF(file, " at <unknown>:<unknown>");
}
}
} else {
PrintF("<unknown>");
}
if (print_args) {
// function arguments
@ -912,7 +906,7 @@ void FrameSummary::Print() {
JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
int literal_id) {
if (literal_id == Translation::kSelfLiteralId) {
return JSFunction::cast(function());
return function();
}
return JSFunction::cast(literal_array->get(literal_id));
@ -1017,7 +1011,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) {
ASSERT(is_optimized());
JSFunction* opt_function = JSFunction::cast(function());
JSFunction* opt_function = function();
Code* code = opt_function->code();
// The code object may have been replaced by lazy deoptimization. Fall
@ -1131,7 +1125,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
int index) const {
HandleScope scope(isolate());
Object* receiver = this->receiver();
Object* function = this->function();
JSFunction* function = this->function();
accumulator->PrintSecurityTokenIfChanged(function);
PrintIndex(accumulator, mode, index);
@ -1145,8 +1139,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// or context slots.
Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
Handle<SharedFunctionInfo> shared(function->shared());
scope_info = Handle<ScopeInfo>(shared->scope_info());
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
@ -1168,7 +1161,6 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add("] ");
}
}
accumulator->Add("(this=%o", receiver);
@ -1257,7 +1249,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// Print details about the function.
if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
SharedFunctionInfo* shared = JSFunction::cast(function)->shared();
SharedFunctionInfo* shared = function->shared();
accumulator->Add("--------- s o u r c e c o d e ---------\n");
shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
accumulator->Add("\n-----------------------------------------\n");
@ -1272,10 +1264,8 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
int index) const {
int actual = ComputeParametersCount();
int expected = -1;
Object* function = this->function();
if (function->IsJSFunction()) {
expected = JSFunction::cast(function)->shared()->formal_parameter_count();
}
JSFunction* function = this->function();
expected = function->shared()->formal_parameter_count();
PrintIndex(accumulator, mode, index);
accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
@ -1568,6 +1558,7 @@ void SetUpJSCallerSavedCodeData() {
ASSERT(i == kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) {
ASSERT(0 <= n && n < kNumJSCallerSaved);
return caller_saved_code_data.reg_code[n];
@ -1600,6 +1591,7 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
return NULL;
}
Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
ZoneList<StackFrame*> list(10, zone);
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {

2
deps/v8/src/frames.h

@ -543,7 +543,7 @@ class JavaScriptFrame: public StandardFrame {
virtual Type type() const { return JAVA_SCRIPT; }
// Accessors.
inline Object* function() const;
inline JSFunction* function() const;
inline Object* receiver() const;
inline void set_receiver(Object* value);

7
deps/v8/src/full-codegen.cc

@ -76,12 +76,15 @@ void BreakableStatementChecker::VisitExportDeclaration(
void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
}
void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
}
void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
}
void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
@ -376,7 +379,7 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
for (unsigned i = 0; i < length; ++i) {
__ dd(back_edges_[i].id.ToInt());
__ dd(back_edges_[i].pc);
__ db(back_edges_[i].loop_depth);
__ dd(back_edges_[i].loop_depth);
}
return offset;
}
@ -1602,7 +1605,7 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
return true;
}
if (expr->IsLiteralCompareUndefined(&sub_expr)) {
if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
return true;
}

4
deps/v8/src/full-codegen.h

@ -136,7 +136,7 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
static const int kBackEdgeEntrySize = 2 * kIntSize + kOneByteSize;
static const int kBackEdgeEntrySize = 3 * kIntSize;
private:
class Breakable;
@ -648,7 +648,7 @@ class FullCodeGenerator: public AstVisitor {
struct BackEdgeEntry {
BailoutId id;
unsigned pc;
uint8_t loop_depth;
uint32_t loop_depth;
};
struct TypeFeedbackCellEntry {

1
deps/v8/src/gdb-jit.cc

@ -2015,6 +2015,7 @@ void GDBJITInterface::AddCode(Handle<Name> name,
}
}
static void AddUnwindInfo(CodeDescription* desc) {
#if V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {

11
deps/v8/src/global-handles.cc

@ -634,6 +634,11 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (!node->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more.
continue;
}
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
@ -656,6 +661,11 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (!it.node()->IsRetainer()) {
// Free nodes do not have weak callbacks. Do not use them to compute
// the next_gc_likely_to_collect_more.
continue;
}
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
@ -799,6 +809,7 @@ void GlobalHandles::PrintStats() {
PrintF(" # total = %d\n", total);
}
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (NodeIterator it(this); !it.done(); it.Advance()) {

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save