Browse Source

Roll V8 back to 3.10.8.13

v0.9.1-release
isaacs 13 years ago
parent
commit
46b09e4190
  1. 107
      deps/v8/ChangeLog
  2. 27
      deps/v8/DEPS
  3. 7
      deps/v8/Makefile
  4. 17
      deps/v8/SConstruct
  5. 131
      deps/v8/build/common.gypi
  6. 36
      deps/v8/build/gyp_v8
  7. 12
      deps/v8/build/standalone.gypi
  8. 8
      deps/v8/include/v8.h
  9. 1
      deps/v8/src/SConscript
  10. 37
      deps/v8/src/api.cc
  11. 4
      deps/v8/src/api.h
  12. 9
      deps/v8/src/arm/builtins-arm.cc
  13. 35
      deps/v8/src/arm/code-stubs-arm.cc
  14. 4
      deps/v8/src/arm/codegen-arm.cc
  15. 4
      deps/v8/src/arm/debug-arm.cc
  16. 7
      deps/v8/src/arm/full-codegen-arm.cc
  17. 53
      deps/v8/src/arm/ic-arm.cc
  18. 5
      deps/v8/src/arm/lithium-arm.cc
  19. 9
      deps/v8/src/arm/lithium-arm.h
  20. 134
      deps/v8/src/arm/lithium-codegen-arm.cc
  21. 80
      deps/v8/src/arm/macro-assembler-arm.cc
  22. 5
      deps/v8/src/arm/macro-assembler-arm.h
  23. 144
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  24. 13
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  25. 12
      deps/v8/src/arm/simulator-arm.h
  26. 50
      deps/v8/src/arm/stub-cache-arm.cc
  27. 20
      deps/v8/src/bootstrapper.cc
  28. 85
      deps/v8/src/builtins.cc
  29. 32
      deps/v8/src/code-stubs.cc
  30. 1
      deps/v8/src/code-stubs.h
  31. 6
      deps/v8/src/codegen.h
  32. 20
      deps/v8/src/contexts.h
  33. 53
      deps/v8/src/d8.cc
  34. 2
      deps/v8/src/d8.h
  35. 32
      deps/v8/src/debug-agent.cc
  36. 42
      deps/v8/src/debug.cc
  37. 50
      deps/v8/src/debug.h
  38. 134
      deps/v8/src/elements-kind.cc
  39. 210
      deps/v8/src/elements-kind.h
  40. 400
      deps/v8/src/elements.cc
  41. 22
      deps/v8/src/elements.h
  42. 5
      deps/v8/src/factory.cc
  43. 13
      deps/v8/src/factory.h
  44. 3
      deps/v8/src/flag-definitions.h
  45. 3
      deps/v8/src/frames.h
  46. 2
      deps/v8/src/func-name-inferrer.h
  47. 3
      deps/v8/src/globals.h
  48. 20
      deps/v8/src/heap-inl.h
  49. 70
      deps/v8/src/heap.cc
  50. 20
      deps/v8/src/heap.h
  51. 33
      deps/v8/src/hydrogen-instructions.cc
  52. 164
      deps/v8/src/hydrogen-instructions.h
  53. 313
      deps/v8/src/hydrogen.cc
  54. 3
      deps/v8/src/hydrogen.h
  55. 3
      deps/v8/src/ia32/assembler-ia32.h
  56. 9
      deps/v8/src/ia32/builtins-ia32.cc
  57. 38
      deps/v8/src/ia32/code-stubs-ia32.cc
  58. 4
      deps/v8/src/ia32/codegen-ia32.cc
  59. 31
      deps/v8/src/ia32/debug-ia32.cc
  60. 16
      deps/v8/src/ia32/full-codegen-ia32.cc
  61. 38
      deps/v8/src/ia32/ic-ia32.cc
  62. 163
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  63. 3
      deps/v8/src/ia32/lithium-codegen-ia32.h
  64. 8
      deps/v8/src/ia32/lithium-ia32.cc
  65. 12
      deps/v8/src/ia32/lithium-ia32.h
  66. 80
      deps/v8/src/ia32/macro-assembler-ia32.cc
  67. 5
      deps/v8/src/ia32/macro-assembler-ia32.h
  68. 148
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  69. 13
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  70. 8
      deps/v8/src/ia32/simulator-ia32.h
  71. 27
      deps/v8/src/ia32/stub-cache-ia32.cc
  72. 85
      deps/v8/src/ic.cc
  73. 20
      deps/v8/src/ic.h
  74. 26
      deps/v8/src/incremental-marking-inl.h
  75. 43
      deps/v8/src/incremental-marking.cc
  76. 15
      deps/v8/src/incremental-marking.h
  77. 2
      deps/v8/src/isolate.h
  78. 88
      deps/v8/src/jsregexp.cc
  79. 100
      deps/v8/src/jsregexp.h
  80. 8
      deps/v8/src/list-inl.h
  81. 3
      deps/v8/src/list.h
  82. 7
      deps/v8/src/lithium.cc
  83. 60
      deps/v8/src/liveedit.cc
  84. 28
      deps/v8/src/mark-compact-inl.h
  85. 224
      deps/v8/src/mark-compact.cc
  86. 64
      deps/v8/src/mark-compact.h
  87. 230
      deps/v8/src/messages.js
  88. 9
      deps/v8/src/mips/builtins-mips.cc
  89. 38
      deps/v8/src/mips/code-stubs-mips.cc
  90. 4
      deps/v8/src/mips/codegen-mips.cc
  91. 4
      deps/v8/src/mips/debug-mips.cc
  92. 8
      deps/v8/src/mips/full-codegen-mips.cc
  93. 56
      deps/v8/src/mips/ic-mips.cc
  94. 134
      deps/v8/src/mips/lithium-codegen-mips.cc
  95. 5
      deps/v8/src/mips/lithium-mips.cc
  96. 6
      deps/v8/src/mips/lithium-mips.h
  97. 81
      deps/v8/src/mips/macro-assembler-mips.cc
  98. 5
      deps/v8/src/mips/macro-assembler-mips.h
  99. 136
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  100. 11
      deps/v8/src/mips/regexp-macro-assembler-mips.h

107
deps/v8/ChangeLog

@ -1,110 +1,3 @@
2012-05-29: Version 3.11.7
Get better function names in stack traces.
Performance and stability improvements on all platforms.
2012-05-24: Version 3.11.6
Fixed RegExp.prototype.toString for incompatible receivers
(issue 1981).
Performance and stability improvements on all platforms.
2012-05-23: Version 3.11.5
Performance and stability improvements on all platforms.
2012-05-22: Version 3.11.4
Some cleanup to common.gypi. This fixes some host/target combinations
that weren't working in the Make build on Mac.
Handle EINTR in socket functions and continue incomplete sends.
(issue 2098)
Fixed python deprecations. (issue 1391)
Made socket send and receive more robust and return 0 on failure.
(Chromium issue 15719)
Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Set '-m32' option for host and target platforms
Performance and stability improvements on all platforms.
2012-05-18: Version 3.11.3
Disable optimization for functions that have scopes that cannot be
reconstructed from the context chain. (issue 2071)
Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
Correctly check for native error objects. (Chromium issue 2138)
Performance and stability improvements on all platforms.
2012-05-16: Version 3.11.2
Revert r11496. (Chromium issue 128146)
Implement map collection for incremental marking. (issue 1465)
Add toString method to CallSite (which describes a frame of the
stack trace).
2012-05-15: Version 3.11.1
Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
Fix freebsd build. (V8 issue 2126)
Performance and stability improvements on all platforms.
2012-05-11: Version 3.11.0
Fixed compose-discard crasher from r11524 (issue 2123).
Activated new global semantics by default. Global variables can
now shadow properties of the global object (ES5.1 erratum).
Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
transitioning (Chromium issue 117409).
Made Error.prototype.name writable again, as required by the spec and
the web (Chromium issue 69187).
Implemented map collection with incremental marking (issue 1465).
Regexp: Fixed overflow in min-match-length calculation
(Chromium issue 126412).
MIPS: Fixed illegal instruction use on Loongson in code for
Math.random() (issue 2115).
Fixed crash bug in VisitChoice (Chromium issue 126272).
Fixed unsigned-Smi check in MappedArgumentsLookup
(Chromium issue 126414).
Fixed LiveEdit for function with no locals (issue 825).
Fixed register clobbering in LoadIC for interceptors
(Chromium issue 125988).
Implemented clearing of CompareICs (issue 2102).
Performance and stability improvements on all platforms.
2012-05-03: Version 3.10.8 2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation. Enabled MIPS cross-compilation.

27
deps/v8/DEPS

@ -1,27 +0,0 @@
# Note: The buildbots evaluate this file with CWD set to the parent
# directory and assume that the root of the checkout is in ./v8/, so
# all paths in here must match this assumption.
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1282",
}
deps_os = {
"win": {
"v8/third_party/cygwin":
"http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
"http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
}
}
hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "v8/build/gyp_v8"],
},
]

7
deps/v8/Makefile

@ -137,12 +137,6 @@ ENVFILE = $(OUTDIR)/environment
# Target definitions. "all" is the default. # Target definitions. "all" is the default.
all: $(MODES) all: $(MODES)
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
# having been created before.
buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
# Compile targets. MODES and ARCHES are convenience targets. # Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION: .SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES)) $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@ -228,7 +222,6 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \ GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \ CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS) -S.android $(GYPFLAGS)

17
deps/v8/SConstruct

@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': { 'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS, 'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': { 'library:shared': {
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], 'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread'] 'LIBS': ['pthread']
} }
}, },
'os:macos': { 'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'], 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': { 'library:shared': {
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], 'CPPDEFINES': ['V8_SHARED']
} }
}, },
'os:freebsd': { 'os:freebsd': {
@ -1601,17 +1601,4 @@ except:
pass pass
def WarnAboutDeprecation():
print """
#######################################################
# WARNING: Building V8 with SCons is deprecated and #
# will not work much longer. Please switch to using #
# the GYP-based build now. Instructions are at #
# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
#######################################################
"""
WarnAboutDeprecation()
import atexit
atexit.register(WarnAboutDeprecation)
Build() Build()

131
deps/v8/build/common.gypi

@ -110,6 +110,13 @@
['v8_enable_gdbjit==1', { ['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',], 'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}], }],
['OS!="mac"', {
# TODO(mark): The OS!="mac" conditional is temporary. It can be
# removed once the Mac Chromium build stops setting target_arch to
# ia32 and instead sets it to mac. Other checks in this file for
# OS=="mac" can be removed at that time as well. This can be cleaned
# up once http://crbug.com/44205 is fixed.
'conditions': [
['v8_target_arch=="arm"', { ['v8_target_arch=="arm"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_ARM', 'V8_TARGET_ARCH_ARM',
@ -145,13 +152,23 @@
'USE_EABI_HARDFLOAT=0', 'USE_EABI_HARDFLOAT=0',
], ],
}], }],
# The ARM assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64" or OS=="android"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
], ],
}], # v8_target_arch=="arm" }],
['v8_target_arch=="ia32"', { ['v8_target_arch=="ia32"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_IA32', 'V8_TARGET_ARCH_IA32',
], ],
}], # v8_target_arch=="ia32" }],
['v8_target_arch=="mips"', { ['v8_target_arch=="mips"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_MIPS', 'V8_TARGET_ARCH_MIPS',
@ -206,21 +223,38 @@
['mips_arch_variant=="loongson"', { ['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',], 'defines': ['_MIPS_ARCH_LOONGSON',],
}], }],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
], ],
}], # v8_target_arch=="mips" }],
['v8_target_arch=="x64"', { ['v8_target_arch=="x64"', {
'defines': [ 'defines': [
'V8_TARGET_ARCH_X64', 'V8_TARGET_ARCH_X64',
], ],
}],
],
}, { # Section for OS=="mac".
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': { 'xcode_settings': {
'ARCHS': [ 'x86_64' ], 'ARCHS': ['i386'],
}, }
'msvs_settings': { }],
'VCLinkerTool': { ['target_arch=="x64"', {
'StackReserveSize': '2097152', 'xcode_settings': {
}, 'ARCHS': ['x86_64'],
}, }
}], # v8_target_arch=="x64" }],
],
}],
['v8_use_liveobjectlist=="true"', { ['v8_use_liveobjectlist=="true"', {
'defines': [ 'defines': [
'ENABLE_DEBUGGER_SUPPORT', 'ENABLE_DEBUGGER_SUPPORT',
@ -238,10 +272,6 @@
'defines': [ 'defines': [
'WIN32', 'WIN32',
], ],
'msvs_configuration_attributes': {
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}], }],
['OS=="win" and v8_enable_prof==1', { ['OS=="win" and v8_enable_prof==1', {
'msvs_settings': { 'msvs_settings': {
@ -253,48 +283,21 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', { or OS=="netbsd"', {
'conditions': [ 'conditions': [
[ 'v8_no_strict_aliasing==1', { [ 'v8_target_arch!="x64"', {
'cflags': [ '-fno-strict-aliasing' ], # Pass -m32 to the compiler iff it understands the flag.
}],
], # conditions
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': { 'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)', 'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
}, },
'cflags': [ '<(m32flag)' ], 'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}], }],
['_toolset=="target"', { [ 'v8_no_strict_aliasing==1', {
'variables': { 'cflags': [ '-fno-strict-aliasing' ],
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
}], }],
['OS=="freebsd" or OS=="openbsd"', { ], # conditions
'cflags': [ '-I/usr/local/include' ],
}], }],
['OS=="netbsd"', { ['OS=="solaris"', {
'cflags': [ '-I/usr/pkg/include' ], 'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}], }],
], # conditions ], # conditions
'configurations': { 'configurations': {
@ -319,11 +322,21 @@
}, },
'VCLinkerTool': { 'VCLinkerTool': {
'LinkIncremental': '2', 'LinkIncremental': '2',
# For future reference, the stack size needs to be increased
# when building for Windows 64-bit, otherwise some test cases
# can cause stack overflow.
# 'StackReserveSize': '297152',
}, },
}, },
'conditions': [ 'conditions': [
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter', 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ], '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}], }],
], ],
@ -351,6 +364,12 @@
}], }],
], ],
}], }],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="mac"', { ['OS=="mac"', {
'xcode_settings': { 'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@ -363,6 +382,11 @@
}, },
}], # OS=="mac" }], # OS=="mac"
['OS=="win"', { ['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'msvs_settings': { 'msvs_settings': {
'VCCLCompilerTool': { 'VCCLCompilerTool': {
'Optimization': '2', 'Optimization': '2',
@ -383,7 +407,12 @@
'VCLinkerTool': { 'VCLinkerTool': {
'LinkIncremental': '1', 'LinkIncremental': '1',
'OptimizeReferences': '2', 'OptimizeReferences': '2',
'OptimizeForWindows98': '1',
'EnableCOMDATFolding': '2', 'EnableCOMDATFolding': '2',
# For future reference, the stack size needs to be
# increased when building for Windows 64-bit, otherwise
# some test cases can cause stack overflow.
# 'StackReserveSize': '297152',
}, },
}, },
}], # OS=="win" }], # OS=="win"

36
deps/v8/build/gyp_v8

@ -38,11 +38,6 @@ import sys
script_dir = os.path.dirname(__file__) script_dir = os.path.dirname(__file__)
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir)) v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
if __name__ == '__main__':
os.chdir(v8_root)
script_dir = os.path.dirname(__file__)
v8_root = '.'
sys.path.insert(0, os.path.join(v8_root, 'tools')) sys.path.insert(0, os.path.join(v8_root, 'tools'))
import utils import utils
@ -98,7 +93,7 @@ def additional_include_files(args=[]):
result.append(path) result.append(path)
# Always include standalone.gypi # Always include standalone.gypi
AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi')) AddInclude(os.path.join(script_dir, 'standalone.gypi'))
# Optionally add supplemental .gypi files if present. # Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi')) supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@ -140,10 +135,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split(). # path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file)) args.extend(shlex.split(gyp_file))
else: else:
# Note that this must not start with "./" or things break. args.append(os.path.join(script_dir, 'all.gyp'))
# So we rely on having done os.chdir(v8_root) above and use the
# relative path.
args.append(os.path.join('build', 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)]) args.extend(['-I' + i for i in additional_include_files(args)])
@ -164,6 +156,28 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform. # Generate for the architectures supported on the given platform.
gyp_args = list(args) gyp_args = list(args)
target_arch = None
for p in gyp_args:
if p.find('-Dtarget_arch=') == 0:
target_arch = p
if target_arch is None:
gyp_args.append('-Dtarget_arch=ia32')
if utils.GuessOS() == 'linux':
gyp_args.append('-S.ia32')
run_gyp(gyp_args)
if utils.GuessOS() == 'linux': if utils.GuessOS() == 'linux':
gyp_args.append('--generator-output=out') gyp_args = list(args)
gyp_args.append('-Dtarget_arch=x64')
gyp_args.append('-S.x64')
run_gyp(gyp_args)
gyp_args = list(args)
gyp_args.append('-Dv8_target_arch=arm')
gyp_args.append('-S.arm')
run_gyp(gyp_args)
gyp_args = list(args)
gyp_args.append('-Dv8_target_arch=mips')
gyp_args.append('-S.mips')
run_gyp(gyp_args) run_gyp(gyp_args)

12
deps/v8/build/standalone.gypi

@ -37,9 +37,8 @@
'variables': { 'variables': {
'variables': { 'variables': {
'conditions': [ 'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
OS=="netbsd" or OS=="mac"', { # This handles the Linux platforms we generally deal with.
# This handles the Unix platforms we generally deal with.
# Anything else gets passed through, which probably won't work # Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch # very well; such hosts should pass an explicit target_arch
# to gyp. # to gyp.
@ -47,8 +46,7 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\ '<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")', s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, { }, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
# OS!="netbsd" and OS!="mac"
'host_arch%': 'ia32', 'host_arch%': 'ia32',
}], }],
], ],
@ -171,9 +169,6 @@
}, },
}], # OS=="win" }], # OS=="win"
['OS=="mac"', { ['OS=="mac"', {
'xcode_settings': {
'SYMROOT': '<(DEPTH)/xcodebuild',
},
'target_defaults': { 'target_defaults': {
'xcode_settings': { 'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO', 'ALWAYS_SEARCH_USER_PATHS': 'NO',
@ -193,7 +188,6 @@
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof 'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4 'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
'PREBINDING': 'NO', # No -Wl,-prebind 'PREBINDING': 'NO', # No -Wl,-prebind
'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO', 'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [ 'OTHER_CFLAGS': [
'-fno-strict-aliasing', '-fno-strict-aliasing',

8
deps/v8/include/v8.h

@ -62,13 +62,11 @@
#else // _WIN32 #else // _WIN32
// Setup for Linux shared library export. // Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default"))) #define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4) #else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT #define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4) #endif // defined(__GNUC__) && (__GNUC__ >= 4)

1
deps/v8/src/SConscript

@ -68,7 +68,6 @@ SOURCES = {
diy-fp.cc diy-fp.cc
dtoa.cc dtoa.cc
elements.cc elements.cc
elements-kind.cc
execution.cc execution.cc
factory.cc factory.cc
flags.cc flags.cc

37
deps/v8/src/api.cc

@ -5040,7 +5040,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>()); ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastObjectElements()) { if (!self->HasFastElements()) {
return Local<Object>(); return Local<Object>();
} }
i::FixedArray* elms = i::FixedArray::cast(self->elements()); i::FixedArray* elms = i::FixedArray::cast(self->elements());
@ -6045,6 +6045,13 @@ int HeapGraphNode::GetSelfSize() const {
} }
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->retained_size();
}
int HeapGraphNode::GetChildrenCount() const { int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@ -6056,7 +6063,29 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>( return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->children()[index]); &ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
} }
@ -6128,7 +6157,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
int HeapSnapshot::GetNodesCount() const { int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries().length(); return ToInternal(this)->entries()->length();
} }
@ -6136,7 +6165,7 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current(); i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode"); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>( return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index)); ToInternal(this)->entries()->at(index));
} }

4
deps/v8/src/api.h

@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) { v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastObjectElements()); ASSERT(value()->HasFastElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset); return v8::internal::FixedArray::cast(value()->elements())->get(offset);
} }
void NeanderObject::set(int offset, v8::internal::Object* value) { void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastObjectElements()); ASSERT(value_->HasFastElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value); v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
} }

9
deps/v8/src/arm/builtins-arm.cc

@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) { Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements; const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0); STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false); __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the // Allocate the JSArray object together with space for a fixed array with the
// requested elements. // requested elements.
@ -208,8 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole, bool fill_with_hole,
Label* gc_required) { Label* gc_required) {
// Load the initial map from the array function. // Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero. if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size); __ tst(array_size, array_size);
@ -441,10 +440,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code); __ b(call_generic_code);
__ bind(&not_double); __ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray // r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
r2, r2,
r9, r9,

35
deps/v8/src/arm/code-stubs-arm.cc

@ -4824,32 +4824,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9; const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4; const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written. // Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers. // Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address. // Argument 8 (sp[16]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address())); __ mov(r0, Operand(ExternalReference::isolate_address()));
__ str(r0, MemOperand(sp, 5 * kPointerSize)); __ str(r0, MemOperand(sp, 4 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1)); __ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 4 * kPointerSize)); __ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0)); __ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0)); __ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2)); __ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize)); __ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer. // Argument 5 (sp[4]): static offsets vector buffer.
@ -4898,9 +4893,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result. // Check the result.
Label success; Label success;
__ cmp(r0, Operand(1)); __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success); __ b(eq, &success);
Label failure; Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@ -7102,8 +7095,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateMapChangeElementTransition // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@ -7366,9 +7359,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements; Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements); __ CheckFastElements(r2, r5, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(r0, &smi_element); __ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiElements(r2, r5, &fast_elements); __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into // Store into the array literal requires a elements transition. Call into
// the runtime. // the runtime.
@ -7380,7 +7373,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4); __ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements); __ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@ -7391,8 +7384,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret(); __ Ret();
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// and value is Smi. // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element); __ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));

4
deps/v8/src/arm/codegen-arm.cc

@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Code generators // Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
} }
void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) { MacroAssembler* masm, Label* fail) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value

4
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -125,8 +125,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions); Assembler::kDebugBreakSlotInstructions);
} }
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)

7
deps/v8/src/arm/full-codegen-arm.cc

@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length()); ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind = ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values( Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1))); FixedArrayBase::cast(constant_elements->get(1)));
@ -1722,7 +1722,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else { } else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays); FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1750,7 +1751,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) { if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));

53
deps/v8/src/arm/ic-arm.cc

@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0. // Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
Label fail; Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(r0, r2); __ mov(r0, r2);
__ Ret(); __ Ret();
__ bind(&fail); __ bind(&fail);
@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value); __ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS -> // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store. // FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS, FAST_DOUBLE_ELEMENTS,
receiver_map, receiver_map,
r4, r4,
&slow); &slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
__ bind(&non_double_value); __ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
receiver_map, receiver_map,
r4, r4,
&slow); &slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code. // Activate inlined smi code.
if (previous_state == UNINITIALIZED) { if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); PatchInlinedSmiCode(address());
} }
} }
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { void PatchInlinedSmiCode(Address address) {
Address cmp_instruction_address = Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset; address + Assembler::kCallTargetAddressOffset;
@ -1729,31 +1729,34 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address); Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr = Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize); Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site. ASSERT(Assembler::IsCmpRegister(instr_at_patch));
// Enabling by changing from ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx // cmp rx, rx
// b eq/ne, <target> // b eq, <target>
// to // to
// tst rx, #kSmiTagMask // tst rx, #kSmiTagMask
// b ne/eq, <target> // b ne, <target>
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2); CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch); Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
patcher.EmitCondition(ne); patcher.EmitCondition(ne);
} else { } else {
ASSERT(Assembler::GetCondition(branch_instr) == ne); ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq); patcher.EmitCondition(eq);
} }
} }

5
deps/v8/src/arm/lithium-arm.cc

@ -2082,9 +2082,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind(); if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
ElementsKind to_kind = instr->transitioned_map()->elements_kind(); instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object()); LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =

9
deps/v8/src/arm/lithium-arm.h

@ -1236,7 +1236,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1253,13 +1252,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer; inputs_[0] = external_pointer;
inputs_[1] = key; inputs_[1] = key;
} }
@ -1273,7 +1272,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1742,7 +1740,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; } LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1765,7 +1762,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
}; };
@ -1810,7 +1806,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };

134
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2587,38 +2587,42 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object()); Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = scratch0(); Register scratch = scratch0();
int map_count = instr->hydrogen()->types()->length(); int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name(); Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done; Label done;
__ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) { for (int i = 0; i < map_count - 1; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i); Handle<Map> map = instr->hydrogen()->types()->at(i);
__ cmp(scratch, Operand(map));
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next; Label next;
__ cmp(scratch, Operand(map));
__ b(ne, &next); __ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name); EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done); __ b(&done);
__ bind(&next); __ bind(&next);
} }
} Handle<Map> map = instr->hydrogen()->types()->last();
if (need_generic) { __ cmp(scratch, Operand(map));
if (instr->hydrogen()->need_generic()) {
Label generic;
__ b(ne, &generic);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&generic);
__ mov(r2, Operand(name)); __ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} }
__ bind(&done); __ bind(&done);
}
} }
@ -2696,10 +2700,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift, __ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount); Map::kElementsKindBitCount);
__ cmp(scratch, Operand(GetInitialFastElementsKind())); __ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(lt, &fail); __ b(eq, &done);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail); __ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@ -2746,9 +2748,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result. // Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
uint32_t offset = FixedArray::kHeaderSize + __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
// Check for the hole value. // Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2780,21 +2780,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
} }
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand(((constant_key + instr->additional_index()) << shift_size) + ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag) FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(elements, elements, operand); __ add(elements, elements, operand);
if (!key_is_constant) { if (!key_is_constant) {
__ add(elements, elements, __ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
(instr->additional_index() << shift_size)));
} }
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32)); __ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment()); DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0); __ vldr(result, elements, 0);
} }
@ -2816,33 +2813,26 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result()); DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand(constant_key << shift_size) ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand); __ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset); __ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low()); __ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset); __ vldr(result, scratch0(), 0);
} }
} else { } else {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, ? MemOperand(external_pointer, constant_key * (1 << shift_size))
(constant_key << shift_size) + additional_offset) : MemOperand(external_pointer, key, LSL, shift_size));
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand); __ ldrsb(result, mem_operand);
@ -2870,12 +2860,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3743,16 +3730,10 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset = int offset =
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset)); __ str(value, FieldMemOperand(elements, offset));
} else { } else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
} }
@ -3794,7 +3775,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} }
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant Operand operand = key_is_constant
? Operand((constant_key << shift_size) + ? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag) FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size); : Operand(key, LSL, shift_size);
__ add(scratch, elements, operand); __ add(scratch, elements, operand);
@ -3812,7 +3793,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs); vs);
} }
__ vstr(value, scratch, instr->additional_index() << shift_size); __ vstr(value, scratch, 0);
} }
@ -3833,33 +3814,25 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value())); DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key << shift_size) Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size)); : Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand); __ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value); __ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), additional_offset); __ vstr(double_scratch0().low(), scratch0(), 0);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), additional_offset); __ vstr(value, scratch0(), 0);
} }
} else { } else {
Register value(ToRegister(instr->value())); Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, ? MemOperand(external_pointer, constant_key * (1 << shift_size))
((constant_key + instr->additional_index()) : MemOperand(external_pointer, key, LSL, shift_size));
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) { switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -3878,10 +3851,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3918,22 +3888,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map)); __ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable); __ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map)); __ mov(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier. // Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs); scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) && } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
IsFastDoubleElementsKind(to_kind)) { to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2)); ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3)); ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg); __ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr); RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) && } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2)); ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3)); ASSERT(new_map_reg.is(r3));
@ -4707,9 +4675,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has // than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // already been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2. // Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@ -4860,11 +4827,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind = ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind(); instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the literal boilerplate ElementsKind is of a type different than
// than the expected one. The check isn't necessary if the boilerplate has // the expected one. The check isn't necessary if the boilerplate has already
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2. // Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));

80
deps/v8/src/arm/macro-assembler-arm.cc

@ -1868,12 +1868,10 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail); b(hi, fail);
} }
@ -1881,25 +1879,22 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail); b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail); b(hi, fail);
} }
void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail); b(hi, fail);
} }
@ -2002,17 +1997,22 @@ void MacroAssembler::CompareMap(Register obj,
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map)); cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind(); Map* transitioned_fast_element_map(
if (IsFastElementsKind(kind)) { map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
bool packed = IsFastPackedElementsKind(kind); ASSERT(transitioned_fast_element_map == NULL ||
Map* current_map = *map; map->elements_kind() != FAST_ELEMENTS);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { if (transitioned_fast_element_map != NULL) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
b(eq, early_success); b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(current_map))); cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
} }
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
} }
} }
} }
@ -2865,38 +2865,28 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map. // Check that the function's map is the same as the expected cached map.
ldr(scratch, int expected_index =
MemOperand(scratch, Context::GetContextMapIndexFromElementsKind(expected_kind);
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
size_t offset = expected_kind * kPointerSize + cmp(map_in_out, ip);
FixedArrayBase::kHeaderSize;
cmp(map_in_out, scratch);
b(ne, no_map_match); b(ne, no_map_match);
// Use the transitioned cached map. // Use the transitioned cached map.
offset = transitioned_kind * kPointerSize + int trans_index =
FixedArrayBase::kHeaderSize; Context::GetContextMapIndexFromElementsKind(transitioned_kind);
ldr(map_in_out, FieldMemOperand(scratch, offset)); ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
} }
void MacroAssembler::LoadInitialArrayMap( void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register function_in, Register scratch, Register map_out) {
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out)); ASSERT(!function_in.is(map_out));
Label done; Label done;
ldr(map_out, FieldMemOperand(function_in, ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset)); JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out, map_out,
scratch, scratch,
&done); &done);
@ -3748,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),
instructions_(instructions), instructions_(instructions),
size_(instructions * Assembler::kInstrSize), size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) { masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size // The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints. // bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/arm/macro-assembler-arm.h

@ -512,8 +512,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction. // Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in, void LoadInitialArrayMap(Register function_in,
Register scratch, Register scratch,
Register map_out, Register map_out);
bool can_have_holes);
void LoadGlobalFunction(int index, Register function); void LoadGlobalFunction(int index, Register function);
@ -803,7 +802,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only // Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not. // elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map, void CheckFastSmiOnlyElements(Register map,
Register scratch, Register scratch,
Label* fail); Label* fail);

144
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -43,31 +43,28 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
/* /*
* This assembler uses the following register assignment convention * This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag. * - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string. * - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset! * Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using * - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods. * LoadCurrentCharacter before using any of the dispatch methods.
* - r8 : Points to tip of backtrack stack * - r8 : points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged. * - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input). * - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and * - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers. * RegExp registers.
* - r12 : IP register, used by assembler. Very volatile. * - r12 : IP register, used by assembler. Very volatile.
* - r13/sp : Points to tip of C stack. * - r13/sp : points to tip of C stack.
* *
* The remaining registers are free for computations. * The remaining registers are free for computations.
* Each call to a public method should retain this convention. * Each call to a public method should retain this convention.
* *
* The stack will have the following structure: * The stack will have the following structure:
* - fp[56] Isolate* isolate (address of the current isolate) * - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[52] direct_call (if 1, direct call from JavaScript code, * - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system). * if 0, call through the runtime system).
* - fp[48] stack_area_base (high end of the memory area to use as * - fp[44] stack_area_base (High end of the memory area to use as
* backtracking stack). * backtracking stack).
* - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output). * - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call. * - fp[36] secondary link/return address used by native call.
* --- sp when called --- * --- sp when called ---
@ -75,17 +72,16 @@ namespace internal {
* - fp[28] old frame pointer (r11). * - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10. * - fp[0..24] backup of registers r4..r10.
* --- frame pointer ---- * --- frame pointer ----
* - fp[-4] end of input (address of end of string). * - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (address of first character in string). * - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start). * - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string). * - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches). * - fp[-20] Offset of location before start of input (effectively character
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a * position -1). Used to initialize capture registers to a
* non-position. * non-position.
* - fp[-28] At start (if 1, we are starting at the start of the * - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0) * string, otherwise 0)
* - fp[-32] register 0 (Only positions must be stored in the first * - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers) * - register 1 num_saved_registers_ registers)
* - ... * - ...
* - register num_registers-1 * - register num_registers-1
@ -201,9 +197,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start; Label not_at_start;
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(ne, &not_at_start); BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -216,9 +212,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(ne, on_not_at_start); BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset())); __ add(r0, end_of_input_address(), Operand(current_input_offset()));
@ -659,7 +655,6 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
// Finalize code - write the entry point code now we know how many // Finalize code - write the entry point code now we know how many
// registers we need. // registers we need.
@ -683,9 +678,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call // Set frame pointer in space for it if this is not a direct call
// from generated code. // from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize)); __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant). __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers. // Check if we have space on the stack for registers.
Label stack_limit_hit; Label stack_limit_hit;
Label stack_ok; Label stack_ok;
@ -704,13 +698,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack // Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers. // for our working registers.
__ mov(r0, Operand(EXCEPTION)); __ mov(r0, Operand(EXCEPTION));
__ jmp(&return_r0); __ jmp(&exit_label_);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
CallCheckStackGuardState(r0); CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result. // If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0); __ b(ne, &exit_label_);
__ bind(&stack_ok); __ bind(&stack_ok);
@ -731,26 +725,16 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers. // position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Initialize code pointer register // Determine whether the start index is zero, that is at the start of the
__ mov(code_pointer(), Operand(masm_->CodeObject())); // string, and store that value in a local variable.
__ cmp(r1, Operand(0));
Label load_char_start_regexp, start_regexp; __ mov(r1, Operand(1), LeaveCC, eq);
// Load newline if index is at start, previous character otherwise. __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ cmp(r1, Operand(0, RelocInfo::NONE)); __ str(r1, MemOperand(frame_pointer(), kAtStart));
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1 // Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// Address of register 0. // Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero)); __ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_)); __ mov(r2, Operand(num_saved_registers_));
@ -759,17 +743,23 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC); __ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop); __ b(ne, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ str(r0, register_location(i));
}
}
} }
// Initialize backtrack stack pointer. // Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_); __ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
// Exit code: // Exit code:
if (success_label_.is_linked()) { if (success_label_.is_linked()) {
@ -796,10 +786,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) { for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i)); __ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1)); __ ldr(r3, register_location(i + 1));
if (global()) {
// Keep capture start in r4 for the zero-length check later.
__ mov(r4, r2);
}
if (mode_ == UC16) { if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1)); __ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1)); __ add(r3, r1, Operand(r3, ASR, 1));
@ -811,54 +797,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex)); __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
} }
} }
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ add(r0, r0, Operand(1));
__ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
__ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// r4: capture start index
__ cmp(current_input_offset(), r4);
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ b(&load_char_start_regexp);
} else {
__ mov(r0, Operand(SUCCESS)); __ mov(r0, Operand(SUCCESS));
} }
}
// Exit and return r0 // Exit and return r0
__ bind(&exit_label_); __ bind(&exit_label_);
if (global()) {
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r0);
// Skip sp past regexp registers and local variables.. // Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer()); __ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc). // Restore registers r4..r11 and return (restoring lr to pc).
@ -880,7 +822,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given // If returning non-zero, we should end execution with the given
// result as return value. // result as return value.
__ b(ne, &return_r0); __ b(ne, &exit_label_);
// String might have moved: Reload end of string from frame. // String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -917,7 +859,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception); __ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception. // Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION)); __ mov(r0, Operand(EXCEPTION));
__ jmp(&return_r0); __ jmp(&exit_label_);
} }
CodeDesc code_desc; CodeDesc code_desc;
@ -1072,9 +1014,8 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
} }
bool RegExpMacroAssemblerARM::Succeed() { void RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_); __ jmp(&success_label_);
return global();
} }
@ -1366,9 +1307,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) { int characters) {
Register offset = current_input_offset(); Register offset = current_input_offset();
if (cp_offset != 0) { if (cp_offset != 0) {
// r4 is not being used to store the capture start index at this point. __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
__ add(r4, current_input_offset(), Operand(cp_offset * char_size())); offset = r0;
offset = r4;
} }
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it. // and the operating system running on the target allow it.

13
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by); virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual bool Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
@ -137,8 +137,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller. // Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize;
@ -150,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize; static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in // When adding local variables remember to push space for them in
// the frame in GetCode. // the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize; static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack. // First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize; static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer. // Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024; static const size_t kRegExpCodeSize = 1024;

12
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4)) (entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, int, Address, int, Isolate*); void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address // Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher. // should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for // The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls. // the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \ (FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address) reinterpret_cast<TryCatch*>(try_catch_address)
@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \ Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \ try_catch_address == NULL ? \

50
deps/v8/src/arm/stub-cache-arm.cc

@ -1581,29 +1581,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object); __ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out. // In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object); __ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin); __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
// edx: receiver // edx: receiver
// r3: map // r3: map
Label try_holey_map; __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
r3, r3,
r7, r7,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
&call_builtin); &call_builtin);
__ mov(r2, receiver); __ mov(r2, receiver);
ElementsTransitionGenerator:: ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object); __ bind(&fast_object);
} else { } else {
__ CheckFastObjectElements(r3, r3, &call_builtin); __ CheckFastObjectElements(r3, r3, &call_builtin);
@ -3385,11 +3372,8 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3513,11 +3497,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3857,11 +3838,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3924,11 +3902,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -4067,11 +4042,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -4253,7 +4225,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind); __ JumpIfNotSmi(value_reg, &transition_elements_kind);
} }
@ -4281,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
__ bind(&finish_store); __ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ add(scratch, __ add(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4291,7 +4263,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch)); __ str(value_reg, MemOperand(scratch));
} else { } else {
ASSERT(IsFastObjectElementsKind(elements_kind)); ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch, __ add(scratch,
elements_reg, elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); Operand(FixedArray::kHeaderSize - kHeapObjectTag));

20
deps/v8/src/bootstrapper.cc

@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype); global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype); SetPrototype(object_fun, prototype);
object_function_map->set_instance_descriptors( object_function_map->
heap->empty_descriptor_array()); set_instance_descriptors(heap->empty_descriptor_array());
} }
// Allocate the empty function as the prototype for function ECMAScript // Allocate the empty function as the prototype for function ECMAScript
@ -516,10 +516,12 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function); function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later // Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE); Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
empty_function_map->set_prototype( function_without_prototype_map);
global_context()->object_function()->prototype()); empty_fm->set_instance_descriptors(
empty_function->set_map(*empty_function_map); function_without_prototype_map->instance_descriptors());
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
return empty_function; return empty_function;
} }
@ -1092,7 +1094,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object. // Check the state of the object.
ASSERT(result->HasFastProperties()); ASSERT(result->HasFastProperties());
ASSERT(result->HasFastObjectElements()); ASSERT(result->HasFastElements());
#endif #endif
} }
@ -1185,7 +1187,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object. // Check the state of the object.
ASSERT(result->HasFastProperties()); ASSERT(result->HasFastProperties());
ASSERT(result->HasFastObjectElements()); ASSERT(result->HasFastElements());
#endif #endif
} }
@ -1635,7 +1637,7 @@ bool Genesis::InstallNatives() {
array_function->initial_map()->CopyDropTransitions(); array_function->initial_map()->CopyDropTransitions();
Map* new_map; Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false; if (!maybe_map->To<Map>(&new_map)) return false;
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS); new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map); array_function->set_initial_map(new_map);
// Make "length" magic on instances. // Make "length" magic on instances.

85
deps/v8/src/builtins.cc

@ -200,12 +200,9 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array()); array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context(); Context* global_context = isolate->context()->global_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() && if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
!global_context->js_array_maps()->IsUndefined()) { !global_context->object_js_array_map()->IsUndefined()) {
FixedArray* map_array = array->set_map(Map::cast(global_context->object_js_array_map()));
FixedArray::cast(global_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
} }
} }
} else { } else {
@ -225,13 +222,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
} }
ElementsKind elements_kind = array->GetElementsKind();
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe_array =
array->TransitionElementsKind(elements_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
// We do not use SetContent to skip the unnecessary elements type check. // We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array)); array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj)); array->set_length(Smi::cast(obj));
@ -260,7 +250,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array. // Allocate an appropriately typed elements array.
MaybeObject* maybe_elms; MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind(); ElementsKind elements_kind = array->GetElementsKind();
if (IsFastDoubleElementsKind(elements_kind)) { if (elements_kind == FAST_DOUBLE_ELEMENTS) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray( maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements); number_of_elements);
} else { } else {
@ -271,15 +261,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content // Fill in the content
switch (array->GetElementsKind()) { switch (array->GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS: {
case FAST_SMI_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms); FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) { for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
} }
break; break;
} }
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
AssertNoAllocation no_gc; AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -289,7 +277,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
} }
break; break;
} }
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: { case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) { for (int index = 0; index < number_of_elements; index++) {
@ -425,7 +412,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements(); HeapObject* elms = array->elements();
Map* map = elms->map(); Map* map = elms->map();
if (map == heap->fixed_array_map()) { if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms; if (args == NULL || array->HasFastElements()) return elms;
if (array->HasFastDoubleElements()) { if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array()); ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition = MaybeObject* maybe_transition =
@ -435,7 +422,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
} }
} else if (map == heap->fixed_cow_array_map()) { } else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() || if (args == NULL || array->HasFastElements() ||
maybe_writable_result->IsFailure()) { maybe_writable_result->IsFailure()) {
return maybe_writable_result; return maybe_writable_result;
} }
@ -529,8 +516,8 @@ BUILTIN(ArrayPush) {
} }
FixedArray* new_elms = FixedArray::cast(obj); FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind(); CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
@ -601,7 +588,7 @@ BUILTIN(ArrayShift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements()); ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value(); if (len == 0) return heap->undefined_value();
@ -643,7 +630,7 @@ BUILTIN(ArrayUnshift) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements()); ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1; int to_add = args.length() - 1;
@ -665,8 +652,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj; if (!maybe_obj->ToObject(&obj)) return maybe_obj;
} }
FixedArray* new_elms = FixedArray::cast(obj); FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind(); CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity); FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms; elms = new_elms;
array->set_elements(elms); array->set_elements(elms);
@ -695,7 +682,7 @@ BUILTIN(ArraySlice) {
int len = -1; int len = -1;
if (receiver->IsJSArray()) { if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
if (!array->HasFastSmiOrObjectElements() || if (!array->HasFastTypeElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) { !IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -711,7 +698,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements = bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map && JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements(); && JSObject::cast(receiver)->HasFastTypeElements();
if (!is_arguments_object_with_fast_elements) { if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args); return CallJsBuiltin(isolate, "ArraySlice", args);
} }
@ -776,9 +763,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array; JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, elements_kind, k, CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()), FixedArray::cast(result_array->elements()),
elements_kind, 0, result_len); FAST_ELEMENTS, 0, result_len);
return result_array; return result_array;
} }
@ -799,7 +786,7 @@ BUILTIN(ArraySplice) {
} }
FixedArray* elms = FixedArray::cast(elms_obj); FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements()); ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
@ -850,9 +837,9 @@ BUILTIN(ArraySplice) {
{ {
// Fill newly created array. // Fill newly created array.
CopyObjectToObjectElements(elms, elements_kind, actual_start, CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()), FixedArray::cast(result_array->elements()),
elements_kind, 0, actual_delete_count); FAST_ELEMENTS, 0, actual_delete_count);
} }
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -901,13 +888,12 @@ BUILTIN(ArraySplice) {
{ {
// Copy the part before actual_start as is. // Copy the part before actual_start as is.
ElementsKind kind = array->GetElementsKind(); CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
CopyObjectToObjectElements(elms, kind, 0, new_elms, FAST_ELEMENTS, 0, actual_start);
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start; const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, kind, CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count, actual_start + actual_delete_count,
new_elms, kind, new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy); actual_start + item_count, to_copy);
} }
@ -954,12 +940,11 @@ BUILTIN(ArrayConcat) {
// and calculating total length. // and calculating total length.
int n_arguments = args.length(); int n_arguments = args.length();
int result_len = 0; int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind(); ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
for (int i = 0; i < n_arguments; i++) { for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i]; Object* arg = args[i];
if (!arg->IsJSArray() || if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
!JSArray::cast(arg)->HasFastSmiOrObjectElements() || || JSArray::cast(arg)->GetPrototype() != array_proto) {
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args); return CallJsBuiltin(isolate, "ArrayConcat", args);
} }
@ -976,20 +961,10 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args); return CallJsBuiltin(isolate, "ArrayConcat", args);
} }
if (!JSArray::cast(arg)->HasFastSmiElements()) { if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS; elements_kind = FAST_ELEMENTS;
} }
} }
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
// Allocate result. // Allocate result.
JSArray* result_array; JSArray* result_array;
@ -1007,8 +982,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]); JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements()); FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, elements_kind, 0, CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, elements_kind, result_elms, FAST_ELEMENTS,
start_pos, len); start_pos, len);
start_pos += len; start_pos += len;
} }

32
deps/v8/src/code-stubs.cc

@ -262,13 +262,10 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm); KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break; break;
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
@ -295,9 +292,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) { switch (elements_kind_) {
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS: {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_, is_js_array_,
elements_kind_, elements_kind_,
@ -305,7 +300,6 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
} }
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_, is_js_array_,
grow_mode_); grow_mode_);
@ -436,32 +430,24 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail; Label fail;
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) { if (to_ == FAST_ELEMENTS) {
if (IsFastSmiOrObjectElementsKind(from_)) { if (from_ == FAST_SMI_ONLY_ELEMENTS) {
ElementsTransitionGenerator:: ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
GenerateMapChangeElementsTransition(masm); } else if (from_ == FAST_DOUBLE_ELEMENTS) {
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_, is_jsarray_,
to_, FAST_ELEMENTS,
grow_mode_); grow_mode_);
} else if (IsFastSmiElementsKind(from_) && } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
IsFastDoubleElementsKind(to_)) { ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_, is_jsarray_,
grow_mode_); grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }

1
deps/v8/src/code-stubs.h

@ -498,7 +498,6 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) { virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_); code->set_compare_state(state_);
code->set_compare_operation(op_);
} }
virtual CodeStub::Major MajorKey() { return CompareIC; } virtual CodeStub::Major MajorKey() { return CompareIC; }

6
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic { class ElementsTransitionGenerator : public AllStatic {
public: public:
static void GenerateMapChangeElementsTransition(MacroAssembler* masm); static void GenerateSmiOnlyToObject(MacroAssembler* masm);
static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail); static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail); static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private: private:

20
deps/v8/src/contexts.h

@ -106,7 +106,9 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \ V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \ V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@ -246,7 +248,9 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX, OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX, INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX,
JS_ARRAY_MAPS_INDEX, SMI_JS_ARRAY_MAP_INDEX,
DOUBLE_JS_ARRAY_MAP_INDEX,
OBJECT_JS_ARRAY_MAP_INDEX,
DATE_FUNCTION_INDEX, DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX, JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX, REGEXP_FUNCTION_INDEX,
@ -369,6 +373,18 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead(); Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions(); void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \ #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \ void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \ ASSERT(IsGlobalContext()); \

53
deps/v8/src/d8.cc

@ -26,8 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Defined when linking against shared lib on Windows. #ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows.
#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
#define V8_SHARED #define V8_SHARED
#endif #endif
@ -316,8 +315,8 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
} }
const char kArrayBufferMarkerPropName[] = "_is_array_buffer_"; const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
const char kArrayBufferReferencePropName[] = "_array_buffer_ref_"; const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
static const int kExternalArrayAllocationHeaderSize = 2; static const int kExternalArrayAllocationHeaderSize = 2;
@ -354,11 +353,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
Local<Value> length_value = (args.Length() < 3) Local<Value> length_value = (args.Length() < 3)
? (first_arg_is_array_buffer ? (first_arg_is_array_buffer
? args[0]->ToObject()->Get(String::New("byteLength")) ? args[0]->ToObject()->Get(String::New("length"))
: args[0]) : args[0])
: args[2]; : args[2];
size_t byteLength = convertToUint(length_value, &try_catch); size_t length = convertToUint(length_value, &try_catch);
size_t length = byteLength;
if (try_catch.HasCaught()) return try_catch.Exception(); if (try_catch.HasCaught()) return try_catch.Exception();
void* data = NULL; void* data = NULL;
@ -370,7 +368,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
data = derived_from->GetIndexedPropertiesExternalArrayData(); data = derived_from->GetIndexedPropertiesExternalArrayData();
size_t array_buffer_length = convertToUint( size_t array_buffer_length = convertToUint(
derived_from->Get(String::New("byteLength")), derived_from->Get(String::New("length")),
&try_catch); &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception(); if (try_catch.HasCaught()) return try_catch.Exception();
@ -453,20 +451,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
array->SetIndexedPropertiesToExternalArrayData( array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type, reinterpret_cast<uint8_t*>(data) + offset, type,
static_cast<int>(length)); static_cast<int>(length));
array->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(byteLength)), ReadOnly);
if (!is_array_buffer_construct) {
array->Set(String::New("length"), array->Set(String::New("length"),
Int32::New(static_cast<int32_t>(length)), ReadOnly); Int32::New(static_cast<int32_t>(length)), ReadOnly);
array->Set(String::New("byteOffset"),
Int32::New(static_cast<int32_t>(offset)), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), array->Set(String::New("BYTES_PER_ELEMENT"),
Int32::New(static_cast<int32_t>(element_size))); Int32::New(static_cast<int32_t>(element_size)));
// We currently support 'buffer' property only if constructed from a buffer.
if (first_arg_is_array_buffer) {
array->Set(String::New("buffer"), args[0], ReadOnly);
}
}
return array; return array;
} }
@ -834,8 +822,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read)); global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbuffer"), global_template->Set(String::New("readbinary"),
FunctionTemplate::New(ReadBuffer)); FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readline"), global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine)); FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load)); global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@ -1054,29 +1042,20 @@ static char* ReadChars(const char* name, int* size_out) {
} }
Handle<Value> Shell::ReadBuffer(const Arguments& args) { Handle<Value> Shell::ReadBinary(const Arguments& args) {
String::Utf8Value filename(args[0]); String::Utf8Value filename(args[0]);
int length; int size;
if (*filename == NULL) { if (*filename == NULL) {
return ThrowException(String::New("Error loading file")); return ThrowException(String::New("Error loading file"));
} }
char* data = ReadChars(*filename, &length); char* chars = ReadChars(*filename, &size);
if (data == NULL) { if (chars == NULL) {
return ThrowException(String::New("Error reading file")); return ThrowException(String::New("Error reading file"));
} }
// We skip checking the string for UTF8 characters and use it raw as
Handle<Object> buffer = Object::New(); // backing store for the external string with 8-bit characters.
buffer->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly); BinaryResource* resource = new BinaryResource(chars, size);
return String::NewExternal(resource);
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
buffer->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data), kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
} }

2
deps/v8/src/d8.h

@ -307,7 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args); static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args); static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args); static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args); static Handle<Value> ReadBinary(const Arguments& args);
static Handle<String> ReadFromStdin(); static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) { static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin(); return ReadFromStdin();

32
deps/v8/src/debug-agent.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -247,7 +247,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) { while (!(c == '\n' && prev_c == '\r')) {
prev_c = c; prev_c = c;
received = conn->Receive(&c, 1); received = conn->Receive(&c, 1);
if (received == 0) { if (received <= 0) {
PrintF("Error %d\n", Socket::LastError()); PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>(); return SmartArrayPointer<char>();
} }
@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) { const char* embedding_host) {
static const int kBufferSize = 80; static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer. char buffer[kBufferSize]; // Sending buffer.
bool ok;
int len; int len;
int r;
// Send the header. // Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n"); "Type: connect\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion()); "V8-Version: %s\r\n", v8::V8::GetVersion());
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n"); "Protocol-Version: 1\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
if (embedding_host != NULL) { if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host); "Embedding-Host: %s\r\n", embedding_host);
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
} }
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength); "%s: 0\r\n", kContentLength);
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
// Terminate header with empty line. // Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n"); len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
r = conn->Send(buffer, len); ok = conn->Send(buffer, len);
if (r != len) return false; if (!ok) return false;
// No body for connect message. // No body for connect message.
@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0; int total_received = 0;
while (total_received < len) { while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received); int received = conn->Receive(data + total_received, len - total_received);
if (received == 0) { if (received <= 0) {
return total_received; return total_received;
} }
total_received += received; total_received += received;

42
deps/v8/src/debug.cc

@ -892,16 +892,6 @@ void Debug::Iterate(ObjectVisitor* v) {
} }
void Debug::PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end) {
OS::Abort();
}
Object* Debug::Break(Arguments args) { Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap(); Heap* heap = isolate_->heap();
HandleScope scope(isolate_); HandleScope scope(isolate_);
@ -994,34 +984,11 @@ Object* Debug::Break(Arguments args) {
// Count frames until target frame // Count frames until target frame
int count = 0; int count = 0;
JavaScriptFrameIterator it(isolate_); JavaScriptFrameIterator it(isolate_);
while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) { while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
count++; count++;
it.Advance(); it.Advance();
} }
// Catch the cases that would lead to crashes and capture
// - C entry FP at which to start stack crawl.
// - FP of the frame at which we plan to stop stepping out (last FP).
// - current FP that's larger than last FP.
// - Counter for the number of steps to step out.
if (it.done()) {
// We crawled the entire stack, never reaching last_fp_.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
NULL,
count,
0xFEEEEEEE);
} else if (it.frame()->fp() != thread_local_.last_fp_) {
// We crawled over last_fp_, without getting a match.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
it.frame()->fp(),
count,
0xFEEEEEEE);
}
// If we found original frame // If we found original frame
if (it.frame()->fp() == thread_local_.last_fp_) { if (it.frame()->fp() == thread_local_.last_fp_) {
if (step_count > 1) { if (step_count > 1) {
@ -2260,13 +2227,6 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
} }
const int Debug::FramePaddingLayout::kInitialSize = 1;
// Any even value bigger than kInitialSize as needed for stack scanning.
const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) { bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global(); return IsLoaded() && global == debug_context()->global();
} }

50
deps/v8/src/debug.h

@ -232,12 +232,6 @@ class Debug {
void PreemptionWhileInDebugger(); void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v); void Iterate(ObjectVisitor* v);
NO_INLINE(void PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end));
Object* Break(Arguments args); Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared, void SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object, Handle<Object> break_point_object,
@ -463,50 +457,6 @@ class Debug {
// Architecture-specific constant. // Architecture-specific constant.
static const bool kFrameDropperSupported; static const bool kFrameDropperSupported;
/**
* Defines layout of a stack frame that supports padding. This is a regular
* internal frame that has a flexible stack structure. LiveEdit can shift
* its lower part up the stack, taking up the 'padding' space when additional
* stack memory is required.
* Such frame is expected immediately above the topmost JavaScript frame.
*
* Stack Layout:
* --- Top
* LiveEdit routine frames
* ---
* C frames of debug handler
* ---
* ...
* ---
* An internal frame that has n padding words:
* - any number of words as needed by code -- upper part of frame
* - padding size: a Smi storing n -- current size of padding
* - padding: n words filled with kPaddingValue in form of Smi
* - 3 context/type words of a regular InternalFrame
* - fp
* ---
* Topmost JavaScript frame
* ---
* ...
* --- Bottom
*/
class FramePaddingLayout : public AllStatic {
public:
// Architecture-specific constant.
static const bool kIsSupported;
// A size of frame base including fp. Padding words starts right above
// the base.
static const int kFrameBaseSize = 4;
// A number of words that should be reserved on stack for the LiveEdit use.
// Normally equals 1. Stored on stack in form of Smi.
static const int kInitialSize;
// A value that padding words are filled with (in form of Smi). Going
// bottom-top, the first word not having this value is a counter word.
static const int kPaddingValue;
};
private: private:
explicit Debug(Isolate* isolate); explicit Debug(Isolate* isolate);
~Debug(); ~Debug();

134
deps/v8/src/elements-kind.cc

@ -1,134 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "elements-kind.h"
#include "api.h"
#include "elements.h"
#include "objects.h"
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
ElementsKind GetInitialFastElementsKind() {
if (FLAG_packed_arrays) {
return FAST_SMI_ELEMENTS;
} else {
return FAST_HOLEY_SMI_ELEMENTS;
}
}
struct InitializeFastElementsKindSequence {
static void Construct(
ElementsKind** fast_elements_kind_sequence_ptr) {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
}
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
ASSERT(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
return i;
}
}
UNREACHABLE();
return 0;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
int index =
GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
elements_kind = GetFastElementsKindFromSequenceIndex(index);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
}
UNREACHABLE();
return TERMINAL_FAST_ELEMENTS_KIND;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS;
case FAST_HOLEY_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS;
case FAST_DOUBLE_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS &&
to_kind != FAST_DOUBLE_ELEMENTS;
case FAST_HOLEY_DOUBLE_ELEMENTS:
return to_kind == FAST_ELEMENTS ||
to_kind == FAST_HOLEY_ELEMENTS;
case FAST_ELEMENTS:
return to_kind == FAST_HOLEY_ELEMENTS;
case FAST_HOLEY_ELEMENTS:
return false;
default:
return false;
}
}
} } // namespace v8::internal

210
deps/v8/src/elements-kind.h

@ -1,210 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
#include "v8checks.h"
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
FAST_HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastSmiElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS;
}
inline bool IsFastObjectElementsKind(ElementsKind kind) {
return kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastHoleyElementsKind(ElementsKind kind) {
return kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsHoleyElementsKind(ElementsKind kind) {
return IsFastHoleyElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
return FAST_SMI_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
return FAST_DOUBLE_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_ELEMENTS) {
return FAST_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == FAST_SMI_ELEMENTS) {
return FAST_HOLEY_SMI_ELEMENTS;
}
if (packed_kind == FAST_DOUBLE_ELEMENTS) {
return FAST_HOLEY_DOUBLE_ELEMENTS;
}
if (packed_kind == FAST_ELEMENTS) {
return FAST_HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
ASSERT(IsFastSmiElementsKind(from_kind));
return (from_kind == FAST_SMI_ELEMENTS)
? FAST_ELEMENTS
: FAST_HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsFastSmiElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind));
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed);
inline bool CanTransitionToMoreGeneralFastElementsKind(
ElementsKind elements_kind,
bool allow_only_packed) {
return IsFastElementsKind(elements_kind) &&
(elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
(!allow_only_packed || elements_kind != FAST_ELEMENTS));
}
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_

400
deps/v8/src/elements.cc

@ -39,14 +39,8 @@
// Inheritance hierarchy: // Inheritance hierarchy:
// - ElementsAccessorBase (abstract) // - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract) // - FastElementsAccessor (abstract)
// - FastSmiOrObjectElementsAccessor // - FastObjectElementsAccessor
// - FastPackedSmiElementsAccessor
// - FastHoleySmiElementsAccessor
// - FastPackedObjectElementsAccessor
// - FastHoleyObjectElementsAccessor
// - FastDoubleElementsAccessor // - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract) // - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor // - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor // - ExternalUnsignedByteElementsAccessor
@ -71,15 +65,9 @@ namespace internal {
// identical. Note that the order must match that of the ElementsKind enum for // identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work. // the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \ #define ELEMENTS_LIST(V) \
V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \ V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \ V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
FixedArray) \ V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \ SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \ V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@ -151,6 +139,8 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start, uint32_t to_start,
int raw_copy_size) { int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map()); ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size; int copy_size = raw_copy_size;
if (raw_copy_size < 0) { if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -158,7 +148,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start, copy_size = Min(from->length() - from_start,
to->length() - to_start); to->length() - to_start);
#ifdef DEBUG #ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole. // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) { for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -170,15 +160,12 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length()); (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return; if (copy_size == 0) return;
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize; Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize; Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start, CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start, reinterpret_cast<Object**>(from_address) + from_start,
copy_size); copy_size);
if (IsFastObjectElementsKind(from_kind) && if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap(); Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) { if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(), heap->RecordWrites(to->address(),
@ -203,7 +190,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start; copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG #ifdef DEBUG
// Fast object arrays cannot be uninitialized. Ensure they are already // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole. // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) { for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -213,7 +200,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif #endif
} }
ASSERT(to != from); ASSERT(to != from);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == 0) return; if (copy_size == 0) return;
uint32_t to_length = to->length(); uint32_t to_length = to->length();
if (to_start + copy_size > to_length) { if (to_start + copy_size > to_length) {
@ -229,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start); to->set_the_hole(i + to_start);
} }
} }
if (IsFastObjectElementsKind(to_kind)) { if (to_kind == FAST_ELEMENTS) {
if (!heap->InNewSpace(to)) { if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(), heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start), to->OffsetOfElementAt(to_start),
@ -247,7 +234,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind, ElementsKind to_kind,
uint32_t to_start, uint32_t to_start,
int raw_copy_size) { int raw_copy_size) {
ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size; int copy_size = raw_copy_size;
if (raw_copy_size < 0) { if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -255,7 +242,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start, copy_size = Min(from->length() - from_start,
to->length() - to_start); to->length() - to_start);
#ifdef DEBUG #ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole. // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) { for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -268,14 +255,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length()); (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from; if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) { for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) { if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return Failure::Exception(); return Failure::Exception();
} else { } else {
MaybeObject* maybe_value = from->get(i + from_start); MaybeObject* maybe_value = from->get(i + from_start);
Object* value; Object* value;
ASSERT(IsFastObjectElementsKind(to_kind)); ASSERT(to_kind == FAST_ELEMENTS);
// Because Double -> Object elements transitions allocate HeapObjects // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle, // iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure // otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they // that no GC is triggered, allocate HeapNumbers from old space if they
@ -417,38 +404,6 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; } virtual ElementsKind kind() const { return ElementsTraits::Kind; }
static void ValidateContents(JSObject* holder, int length) {
}
static void ValidateImpl(JSObject* holder) {
FixedArrayBase* fixed_array_base = holder->elements();
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->raw_unchecked_one_pointer_filler_map() ||
map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::cast(length_obj)->value();
}
} else {
length = fixed_array_base->length();
}
ElementsAccessorSubclass::ValidateContents(holder, length);
}
virtual void Validate(JSObject* holder) {
ElementsAccessorSubclass::ValidateImpl(holder);
}
static bool HasElementImpl(Object* receiver, static bool HasElementImpl(Object* receiver,
JSObject* holder, JSObject* holder,
uint32_t key, uint32_t key,
@ -469,7 +424,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store)); receiver, holder, key, BackingStore::cast(backing_store));
} }
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver, virtual MaybeObject* Get(Object* receiver,
JSObject* holder, JSObject* holder,
uint32_t key, uint32_t key,
FixedArrayBase* backing_store) { FixedArrayBase* backing_store) {
@ -480,7 +435,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store)); receiver, holder, key, BackingStore::cast(backing_store));
} }
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, static MaybeObject* GetImpl(Object* receiver,
JSObject* obj, JSObject* obj,
uint32_t key, uint32_t key,
BackingStore* backing_store) { BackingStore* backing_store) {
@ -489,19 +444,17 @@ class ElementsAccessorBase : public ElementsAccessor {
: backing_store->GetHeap()->the_hole_value(); : backing_store->GetHeap()->the_hole_value();
} }
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array, virtual MaybeObject* SetLength(JSArray* array,
Object* length) { Object* length) {
return ElementsAccessorSubclass::SetLengthImpl( return ElementsAccessorSubclass::SetLengthImpl(
array, length, BackingStore::cast(array->elements())); array, length, BackingStore::cast(array->elements()));
} }
MUST_USE_RESULT static MaybeObject* SetLengthImpl( static MaybeObject* SetLengthImpl(JSObject* obj,
JSObject* obj,
Object* length, Object* length,
BackingStore* backing_store); BackingStore* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength( virtual MaybeObject* SetCapacityAndLength(JSArray* array,
JSArray* array,
int capacity, int capacity,
int length) { int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength( return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
@ -510,19 +463,18 @@ class ElementsAccessorBase : public ElementsAccessor {
length); length);
} }
MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength( static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
JSObject* obj,
int capacity, int capacity,
int length) { int length) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return obj; return obj;
} }
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, virtual MaybeObject* Delete(JSObject* obj,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) = 0; JSReceiver::DeleteMode mode) = 0;
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
@ -532,7 +484,7 @@ class ElementsAccessorBase : public ElementsAccessor {
return NULL; return NULL;
} }
MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder, virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
@ -549,8 +501,7 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, to, to_kind, to_start, copy_size); from, from_start, to, to_kind, to_start, copy_size);
} }
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
Object* receiver,
JSObject* holder, JSObject* holder,
FixedArray* to, FixedArray* to,
FixedArrayBase* from) { FixedArrayBase* from) {
@ -669,7 +620,6 @@ class FastElementsAccessor
KindTraits>(name) {} KindTraits>(name) {}
protected: protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>; friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore; typedef typename KindTraits::BackingStore BackingStore;
@ -680,21 +630,10 @@ class FastElementsAccessor
Object* length_object, Object* length_object,
uint32_t length) { uint32_t length) {
uint32_t old_capacity = backing_store->length(); uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
bool same_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
// Check whether the backing store should be shrunk. // Check whether the backing store should be shrunk.
if (length <= old_capacity) { if (length <= old_capacity) {
if (array->HasFastSmiOrObjectElements()) { if (array->HasFastTypeElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements(); MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj; if (!maybe_obj->To(&backing_store)) return maybe_obj;
} }
@ -726,40 +665,39 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass:: MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length); SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
array->ValidateElements();
return length_object; return length_object;
} }
// Request conversion to slow elements. // Request conversion to slow elements.
return array->GetHeap()->undefined_value(); return array->GetHeap()->undefined_value();
} }
};
class FastObjectElementsAccessor
: public FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize> {
public:
explicit FastObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj, static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key, uint32_t key) {
JSReceiver::DeleteMode mode) { ASSERT(obj->HasFastElements() ||
ASSERT(obj->HasFastSmiOrObjectElements() || obj->HasFastSmiOnlyElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements()); obj->HasFastArgumentsElements());
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap(); Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) { if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
backing_store = backing_store = FixedArray::cast(backing_store->get(1));
KindTraits::BackingStore::cast(
FixedArray::cast(backing_store)->get(1));
} else { } else {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
MaybeObject* transitioned =
obj->TransitionElementsKind(GetHoleyElementsKind(kind));
if (transitioned->IsFailure()) return transitioned;
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
Object* writable; Object* writable;
MaybeObject* maybe = obj->EnsureWritableFastElements(); MaybeObject* maybe = obj->EnsureWritableFastElements();
if (!maybe->ToObject(&writable)) return maybe; if (!maybe->ToObject(&writable)) return maybe;
backing_store = KindTraits::BackingStore::cast(writable); backing_store = FixedArray::cast(writable);
}
} }
uint32_t length = static_cast<uint32_t>( uint32_t length = static_cast<uint32_t>(
obj->IsJSArray() obj->IsJSArray()
@ -771,14 +709,15 @@ class FastElementsAccessor
// has too few used values, normalize it. // has too few used values, normalize it.
// To avoid doing the check on every delete we require at least // To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted. // one adjacent hole to the value being deleted.
Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64; const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck && if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) && !heap->InNewSpace(backing_store) &&
((key > 0 && backing_store->is_the_hole(key - 1)) || ((key > 0 && backing_store->get(key - 1) == hole) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) { (key + 1 < length && backing_store->get(key + 1) == hole))) {
int num_used = 0; int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) { for (int i = 0; i < backing_store->length(); ++i) {
if (!backing_store->is_the_hole(i)) ++num_used; if (backing_store->get(i) != hole) ++num_used;
// Bail out early if more than 1/4 is used. // Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break; if (4 * num_used > backing_store->length()) break;
} }
@ -791,74 +730,26 @@ class FastElementsAccessor
return heap->true_value(); return heap->true_value();
} }
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
static bool HasElementImpl(
Object* receiver,
JSObject* holder,
uint32_t key,
typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
#if DEBUG
FixedArrayBase* elements = holder->elements();
Heap* heap = elements->GetHeap();
Map* map = elements->map();
ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
(map == heap->fixed_array_map() ||
map == heap->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(KindTraits::Kind) ==
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
backing_store->is_the_hole(i)));
}
#endif
}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize> {
public:
explicit FastSmiOrObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize>(name) {}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from, static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
uint32_t to_start, uint32_t to_start,
int copy_size) { int copy_size) {
if (IsFastSmiOrObjectElementsKind(to_kind)) { switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
CopyObjectToObjectElements( CopyObjectToObjectElements(
FixedArray::cast(from), KindTraits::Kind, from_start, FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size); FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) { return from;
}
case FAST_DOUBLE_ELEMENTS:
CopyObjectToDoubleElements( CopyObjectToDoubleElements(
FixedArray::cast(from), from_start, FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size); FixedDoubleArray::cast(to), to_start, copy_size);
} else { return from;
default:
UNREACHABLE(); UNREACHABLE();
} }
return to->GetHeap()->undefined_value(); return to->GetHeap()->undefined_value();
@ -868,85 +759,51 @@ class FastSmiOrObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity, uint32_t capacity,
uint32_t length) { uint32_t length) {
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode = JSObject::SetFastElementsCapacityMode set_capacity_mode =
obj->HasFastSmiElements() obj->HasFastSmiOnlyElements()
? JSObject::kAllowSmiElements ? JSObject::kAllowSmiOnlyElements
: JSObject::kDontAllowSmiElements; : JSObject::kDontAllowSmiOnlyElements;
return obj->SetFastElementsCapacityAndLength(capacity, return obj->SetFastElementsCapacityAndLength(capacity,
length, length,
set_capacity_mode); set_capacity_mode);
} }
};
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> > {
public:
explicit FastPackedSmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
};
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
public:
explicit FastHoleySmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
};
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> > {
public:
explicit FastPackedObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> >(name) {}
};
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>;
class FastHoleyObjectElementsAccessor virtual MaybeObject* Delete(JSObject* obj,
: public FastSmiOrObjectElementsAccessor< uint32_t key,
FastHoleyObjectElementsAccessor, JSReceiver::DeleteMode mode) {
ElementsKindTraits<FAST_HOLEY_ELEMENTS> > { return DeleteCommon(obj, key);
public: }
explicit FastHoleyObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
}; };
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastDoubleElementsAccessor class FastDoubleElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass, : public FastElementsAccessor<FastDoubleElementsAccessor,
KindTraits, ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize> { kDoubleSize> {
public: public:
explicit FastDoubleElementsAccessor(const char* name) explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass, : FastElementsAccessor<FastDoubleElementsAccessor,
KindTraits, ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>(name) {} kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity, uint32_t capacity,
uint32_t length) { uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity, return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
length);
} }
protected: protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
friend class FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from, static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
@ -954,15 +811,12 @@ class FastDoubleElementsAccessor
uint32_t to_start, uint32_t to_start,
int copy_size) { int copy_size) {
switch (to_kind) { switch (to_kind) {
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements( return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to), FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size); to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start, CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to), FixedDoubleArray::cast(to),
to_start, copy_size); to_start, copy_size);
@ -972,35 +826,26 @@ class FastDoubleElementsAccessor
} }
return to->GetHeap()->undefined_value(); return to->GetHeap()->undefined_value();
} }
};
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
explicit FastPackedDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
};
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
int length = obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: FixedDoubleArray::cast(obj->elements())->length();
if (key < static_cast<uint32_t>(length)) {
FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
}
return obj->GetHeap()->true_value();
}
class FastHoleyDoubleElementsAccessor static bool HasElementImpl(Object* receiver,
: public FastDoubleElementsAccessor< JSObject* holder,
FastHoleyDoubleElementsAccessor, uint32_t key,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > { FixedDoubleArray* backing_store) {
public: return key < static_cast<uint32_t>(backing_store->length()) &&
friend class ElementsAccessorBase< !backing_store->is_the_hole(key);
FastHoleyDoubleElementsAccessor, }
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
explicit FastHoleyDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
}; };
@ -1021,7 +866,7 @@ class ExternalElementsAccessor
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass, friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ElementsKindTraits<Kind> >; ElementsKindTraits<Kind> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, static MaybeObject* GetImpl(Object* receiver,
JSObject* obj, JSObject* obj,
uint32_t key, uint32_t key,
BackingStore* backing_store) { BackingStore* backing_store) {
@ -1031,8 +876,7 @@ class ExternalElementsAccessor
: backing_store->GetHeap()->undefined_value(); : backing_store->GetHeap()->undefined_value();
} }
MUST_USE_RESULT static MaybeObject* SetLengthImpl( static MaybeObject* SetLengthImpl(JSObject* obj,
JSObject* obj,
Object* length, Object* length,
BackingStore* backing_store) { BackingStore* backing_store) {
// External arrays do not support changing their length. // External arrays do not support changing their length.
@ -1040,7 +884,7 @@ class ExternalElementsAccessor
return obj; return obj;
} }
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, virtual MaybeObject* Delete(JSObject* obj,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) { JSReceiver::DeleteMode mode) {
// External arrays always ignore deletes. // External arrays always ignore deletes.
@ -1158,8 +1002,7 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new // Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior. // length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize( static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
SeededNumberDictionary* dict,
JSArray* array, JSArray* array,
Object* length_object, Object* length_object,
uint32_t length) { uint32_t length) {
@ -1214,8 +1057,7 @@ class DictionaryElementsAccessor
return length_object; return length_object;
} }
MUST_USE_RESULT static MaybeObject* DeleteCommon( static MaybeObject* DeleteCommon(JSObject* obj,
JSObject* obj,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) { JSReceiver::DeleteMode mode) {
Isolate* isolate = obj->GetIsolate(); Isolate* isolate = obj->GetIsolate();
@ -1260,23 +1102,20 @@ class DictionaryElementsAccessor
return heap->true_value(); return heap->true_value();
} }
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
uint32_t to_start, uint32_t to_start,
int copy_size) { int copy_size) {
switch (to_kind) { switch (to_kind) {
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements( CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start, SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size); FixedArray::cast(to), to_kind, to_start, copy_size);
return from; return from;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements( CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start, SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size); FixedDoubleArray::cast(to), to_start, copy_size);
@ -1292,14 +1131,13 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor, friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >; ElementsKindTraits<DICTIONARY_ELEMENTS> >;
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, virtual MaybeObject* Delete(JSObject* obj,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) { JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode); return DeleteCommon(obj, key, mode);
} }
MUST_USE_RESULT static MaybeObject* GetImpl( static MaybeObject* GetImpl(Object* receiver,
Object* receiver,
JSObject* obj, JSObject* obj,
uint32_t key, uint32_t key,
SeededNumberDictionary* backing_store) { SeededNumberDictionary* backing_store) {
@ -1348,7 +1186,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
NonStrictArgumentsElementsAccessor, NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >; ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, static MaybeObject* GetImpl(Object* receiver,
JSObject* obj, JSObject* obj,
uint32_t key, uint32_t key,
FixedArray* parameter_map) { FixedArray* parameter_map) {
@ -1378,8 +1216,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
} }
} }
MUST_USE_RESULT static MaybeObject* SetLengthImpl( static MaybeObject* SetLengthImpl(JSObject* obj,
JSObject* obj,
Object* length, Object* length,
FixedArray* parameter_map) { FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we // TODO(mstarzinger): This was never implemented but will be used once we
@ -1388,7 +1225,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return obj; return obj;
} }
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj, virtual MaybeObject* Delete(JSObject* obj,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) { JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements()); FixedArray* parameter_map = FixedArray::cast(obj->elements());
@ -1403,16 +1240,13 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) { if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode); return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else { } else {
// It's difficult to access the version of DeleteCommon that is declared return FastObjectElementsAccessor::DeleteCommon(obj, key);
// in the templatized super class, call the concrete implementation in
// the class for the most generalized ElementsKind subclass.
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
} }
} }
return obj->GetHeap()->true_value(); return obj->GetHeap()->true_value();
} }
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start, uint32_t from_start,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
@ -1470,7 +1304,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) { if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS]; return elements_accessors_[DICTIONARY_ELEMENTS];
} else { } else {
return elements_accessors_[FAST_HOLEY_ELEMENTS]; return elements_accessors_[FAST_ELEMENTS];
} }
case EXTERNAL_BYTE_ARRAY_TYPE: case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS]; return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
@ -1520,7 +1354,7 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits> template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>:: ElementsKindTraits>::
SetLengthImpl(JSObject* obj, SetLengthImpl(JSObject* obj,
Object* length, Object* length,

22
deps/v8/src/elements.h

@ -28,7 +28,6 @@
#ifndef V8_ELEMENTS_H_ #ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_ #define V8_ELEMENTS_H_
#include "elements-kind.h"
#include "objects.h" #include "objects.h"
#include "heap.h" #include "heap.h"
#include "isolate.h" #include "isolate.h"
@ -46,10 +45,6 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0; virtual ElementsKind kind() const = 0;
const char* name() const { return name_; } const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified key // Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass // without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with // in the backing store to use for the check, which must be compatible with
@ -65,8 +60,7 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must // can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If // be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store. // backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual MaybeObject* Get( virtual MaybeObject* Get(Object* receiver,
Object* receiver,
JSObject* holder, JSObject* holder,
uint32_t key, uint32_t key,
FixedArrayBase* backing_store = NULL) = 0; FixedArrayBase* backing_store = NULL) = 0;
@ -76,7 +70,7 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest // have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable. // element that is non-deletable.
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder, virtual MaybeObject* SetLength(JSArray* holder,
Object* new_length) = 0; Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying // Modifies both the length and capacity of a JSArray, resizing the underlying
@ -85,12 +79,12 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by // elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about // runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics. // EcmaScript 5.1 semantics.
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array, virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity, int capacity,
int length) = 0; int length) = 0;
// Deletes an element in an object, returning a new elements backing store. // Deletes an element in an object, returning a new elements backing store.
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder, virtual MaybeObject* Delete(JSObject* holder,
uint32_t key, uint32_t key,
JSReceiver::DeleteMode mode) = 0; JSReceiver::DeleteMode mode) = 0;
@ -107,8 +101,7 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing // the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is // store is available, it can be passed in source and source_holder is
// ignored. // ignored.
MUST_USE_RESULT virtual MaybeObject* CopyElements( virtual MaybeObject* CopyElements(JSObject* source_holder,
JSObject* source_holder,
uint32_t source_start, uint32_t source_start,
FixedArrayBase* destination, FixedArrayBase* destination,
ElementsKind destination_kind, ElementsKind destination_kind,
@ -116,7 +109,7 @@ class ElementsAccessor {
int copy_size, int copy_size,
FixedArrayBase* source = NULL) = 0; FixedArrayBase* source = NULL) = 0;
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder, MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to, FixedArrayBase* to,
ElementsKind to_kind, ElementsKind to_kind,
FixedArrayBase* from = NULL) { FixedArrayBase* from = NULL) {
@ -124,8 +117,7 @@ class ElementsAccessor {
kCopyToEndAndInitializeToHole, from); kCopyToEndAndInitializeToHole, from);
} }
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
Object* receiver,
JSObject* holder, JSObject* holder,
FixedArray* to, FixedArray* to,
FixedArrayBase* from = NULL) = 0; FixedArrayBase* from = NULL) = 0;

5
deps/v8/src/factory.cc

@ -775,7 +775,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) { instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type, Handle<Map> initial_map = NewMap(type,
instance_size, instance_size,
GetInitialFastElementsKind()); FAST_SMI_ONLY_ELEMENTS);
function->set_initial_map(*initial_map); function->set_initial_map(*initial_map);
initial_map->set_constructor(*function); initial_map->set_constructor(*function);
} }
@ -1013,11 +1013,10 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array, void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) { EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID( CALL_HEAP_FUNCTION_VOID(
isolate(), isolate(),
array->EnsureCanContainElements(*elements, length, mode)); array->EnsureCanContainElements(*elements, mode));
} }

13
deps/v8/src/factory.h

@ -216,10 +216,9 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell( Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value); Handle<Object> value);
Handle<Map> NewMap( Handle<Map> NewMap(InstanceType type,
InstanceType type,
int instance_size, int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -270,14 +269,13 @@ class Factory {
Handle<JSModule> NewJSModule(); Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser. // JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray( Handle<JSArray> NewJSArray(int capacity,
int capacity, ElementsKind elements_kind = FAST_ELEMENTS,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements( Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, ElementsKind elements_kind = FAST_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array, void SetElementsCapacityAndLength(Handle<JSArray> array,
@ -289,7 +287,6 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array); void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array, void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements, Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode); EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);

3
deps/v8/src/flag-definitions.h

@ -150,7 +150,6 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features. // Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations, DEFINE_bool(clever_optimizations,
true, true,
@ -198,8 +197,6 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement") DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true, DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination") "perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_int(stress_runs, 0, "number of stress runs")

3
deps/v8/src/frames.h

@ -211,9 +211,6 @@ class StackFrame BASE_EMBEDDED {
virtual void SetCallerFp(Address caller_fp) = 0; virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
void UpdateFp(Address fp) { state_.fp = fp; }
Address* pc_address() const { return state_.pc_address; } Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame. // Get the id of this stack frame.

2
deps/v8/src/func-name-inferrer.h

@ -88,8 +88,6 @@ class FuncNameInferrer : public ZoneObject {
void Leave() { void Leave() {
ASSERT(IsOpen()); ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast()); names_stack_.Rewind(entries_stack_.RemoveLast());
if (entries_stack_.is_empty())
funcs_to_infer_.Clear();
} }
private: private:

3
deps/v8/src/globals.h

@ -345,9 +345,6 @@ F FUNCTION_CAST(Address addr) {
#define INLINE(header) inline __attribute__((always_inline)) header #define INLINE(header) inline __attribute__((always_inline)) header
#define NO_INLINE(header) __attribute__((noinline)) header #define NO_INLINE(header) __attribute__((noinline)) header
#endif #endif
#elif defined(_MSC_VER) && !defined(DEBUG)
#define INLINE(header) __forceinline header
#define NO_INLINE(header) header
#else #else
#define INLINE(header) inline header #define INLINE(header) inline header
#define NO_INLINE(header) header #define NO_INLINE(header) header

20
deps/v8/src/heap-inl.h

@ -595,24 +595,12 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() { void ExternalStringTable::Verify() {
#ifdef DEBUG #ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) { for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]); ASSERT(heap_->InNewSpace(new_space_strings_[i]));
// TODO(yangguo): check that the object is indeed an external string. ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
} }
for (int i = 0; i < old_space_strings_.length(); ++i) { for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]); ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
// TODO(yangguo): check that the object is indeed an external string. ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
} }
#endif #endif
} }

70
deps/v8/src/heap.cc

@ -171,9 +171,6 @@ Heap::Heap()
global_contexts_list_ = NULL; global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this; mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this; external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
} }
@ -808,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size); UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects(); size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges && if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) { IsStableOrIncreasingSurvivalTrend()) {
@ -2023,7 +2020,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0); map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors(); map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->init_prototype_transitions(undefined_value()); map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_unused_property_fields(0); map->set_unused_property_fields(0);
map->set_bit_field(0); map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible); map->set_bit_field2(1 << Map::kIsExtensible);
@ -2162,15 +2159,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps. // Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors(); meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array()); meta_map()->set_code_cache(empty_fixed_array());
meta_map()->init_prototype_transitions(undefined_value()); meta_map()->set_prototype_transitions(empty_fixed_array());
fixed_array_map()->init_instance_descriptors(); fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array()); fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->init_prototype_transitions(undefined_value()); fixed_array_map()->set_prototype_transitions(empty_fixed_array());
oddball_map()->init_instance_descriptors(); oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array()); oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->init_prototype_transitions(undefined_value()); oddball_map()->set_prototype_transitions(empty_fixed_array());
// Fix prototype object for existing maps. // Fix prototype object for existing maps.
meta_map()->set_prototype(null_value()); meta_map()->set_prototype(null_value());
@ -2469,7 +2466,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there // bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case. // appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj); Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); new_neander_map->set_elements_kind(FAST_ELEMENTS);
set_neander_map(new_neander_map); set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@ -3050,7 +3047,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
} }
JSMessageObject* message = JSMessageObject::cast(result); JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type); message->set_type(type);
message->set_arguments(arguments); message->set_arguments(arguments);
@ -3327,8 +3323,6 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
Map* map = external_ascii_string_map(); Map* map = external_ascii_string_map();
Object* result; Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE); { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3754,7 +3748,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object // Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties()); ASSERT(JSObject::cast(result)->HasFastProperties());
ASSERT(JSObject::cast(result)->HasFastObjectElements()); ASSERT(JSObject::cast(result)->HasFastElements());
return result; return result;
} }
@ -3799,7 +3793,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties); map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties); map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype); map->set_prototype(prototype);
ASSERT(map->has_fast_object_elements()); ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add // If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object // field descriptors for these to the initial map as the object
@ -3916,7 +3910,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj), InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties), FixedArray::cast(properties),
map); map);
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements()); ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
JSObject::cast(obj)->HasFastElements());
return obj; return obj;
} }
@ -3961,9 +3956,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode, ArrayStorageAllocationMode mode,
PretenureFlag pretenure) { PretenureFlag pretenure) {
ASSERT(capacity >= length); ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array; JSArray* array;
if (!maybe_array->To(&array)) return maybe_array; if (!maybe_array->To(&array)) return maybe_array;
@ -3984,7 +3976,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
} }
} else { } else {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); ASSERT(elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_SMI_ONLY_ELEMENTS);
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity); maybe_elms = AllocateUninitializedFixedArray(capacity);
} else { } else {
@ -4010,7 +4003,6 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements); array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length())); array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array; return array;
} }
@ -4495,16 +4487,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length); String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField); String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size()); ASSERT_EQ(size, HeapObject::cast(result)->Size());
#ifdef DEBUG
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif // DEBUG
return result; return result;
} }
@ -4551,13 +4533,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context(); Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function(); JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map(); Map* map = array_function->initial_map();
Object* maybe_map_array = global_context->js_array_maps(); if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (!maybe_map_array->IsUndefined()) { map = Map::cast(global_context->double_js_array_map());
Object* maybe_transitioned_map = } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
FixedArray::cast(maybe_map_array)->get(elements_kind); map = Map::cast(global_context->object_js_array_map());
if (!maybe_transitioned_map->IsUndefined()) { } else {
map = Map::cast(maybe_transitioned_map); ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
} ASSERT(map == global_context->smi_js_array_map());
} }
return AllocateJSObjectFromMap(map, pretenure); return AllocateJSObjectFromMap(map, pretenure);
@ -4842,7 +4824,9 @@ MaybeObject* Heap::AllocateGlobalContext() {
} }
Context* context = reinterpret_cast<Context*>(result); Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map()); context->set_map_no_write_barrier(global_context_map());
context->set_js_array_maps(undefined_value()); context->set_smi_js_array_map(undefined_value());
context->set_double_js_array_map(undefined_value());
context->set_object_js_array_map(undefined_value());
ASSERT(context->IsGlobalContext()); ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext()); ASSERT(result->IsContext());
return result; return result;
@ -5826,6 +5810,16 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
} }
intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
intptr_t Heap::PromotedSpaceSizeOfObjects() { intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects() return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects() + old_data_space_->SizeOfObjects()

20
deps/v8/src/heap.h

@ -621,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap( MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type, InstanceType instance_type,
int instance_size, int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping. // Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -1342,7 +1342,7 @@ class Heap {
PretenureFlag pretenure); PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() { inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); return PromotedSpaceSize() + PromotedExternalMemorySize();
} }
// True if we have reached the allocation limit in the old generation that // True if we have reached the allocation limit in the old generation that
@ -1363,6 +1363,19 @@ class Heap {
static const intptr_t kMinimumAllocationLimit = static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB); 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3; const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit = intptr_t limit =
@ -1455,7 +1468,7 @@ class Heap {
intptr_t adjusted_allocation_limit = intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5; old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true; if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false; return false;
} }
@ -1493,6 +1506,7 @@ class Heap {
GCTracer* tracer() { return tracer_; } GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces. // Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects(); intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; } double total_regexp_code_generated() { return total_regexp_code_generated_; }

33
deps/v8/src/hydrogen-instructions.cc

@ -1603,7 +1603,6 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
SetOperandAt(1, object); SetOperandAt(1, object);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnMaps);
int map_transitions = 0;
for (int i = 0; for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism; i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) { ++i) {
@ -1625,20 +1624,13 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
case CONSTANT_FUNCTION: case CONSTANT_FUNCTION:
types_.Add(types->at(i)); types_.Add(types->at(i));
break; break;
case MAP_TRANSITION:
// We should just ignore these since they are not relevant to a load
// operation. This means we will deopt if we actually see this map
// from optimized code.
map_transitions++;
break;
default: default:
break; break;
} }
} }
} }
if (types_.length() + map_transitions == types->length() && if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN); SetFlag(kUseGVN);
} else { } else {
SetAllSideEffects(); SetAllSideEffects();
@ -1685,9 +1677,6 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("["); stream->Add("[");
key()->PrintNameTo(stream); key()->PrintNameTo(stream);
stream->Add("]"); stream->Add("]");
if (hole_check_mode_ == PERFORM_HOLE_CHECK) {
stream->Add(" check_hole");
}
} }
@ -1739,7 +1728,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache, index_cache,
key_load->key(), key_load->key(),
OMIT_HOLE_CHECK); HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex( HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index); object(), index);
map_check->InsertBefore(this); map_check->InsertBefore(this);
@ -1787,11 +1776,8 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -1888,12 +1874,9 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel"); stream->Add("pixel");
break; break;
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -1908,13 +1891,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) { void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream); object()->PrintNameTo(stream);
ElementsKind from_kind = original_map()->elements_kind(); stream->Add(" %p -> %p", *original_map(), *transitioned_map());
ElementsKind to_kind = transitioned_map()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
*original_map(),
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
} }

164
deps/v8/src/hydrogen-instructions.h

@ -2083,21 +2083,28 @@ class HCheckMaps: public HTemplateInstruction<2> {
HCheckMaps* check_map = new HCheckMaps(object, map); HCheckMaps* check_map = new HCheckMaps(object, map);
SmallMapList* map_set = check_map->map_set(); SmallMapList* map_set = check_map->map_set();
// Since transitioned elements maps of the initial map don't fail the map // If the map to check has the untransitioned elements, it can be hoisted
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds. // above TransitionElements instructions.
if (map->has_fast_smi_only_elements()) {
check_map->ClearGVNFlag(kDependsOnElementsKind); check_map->ClearGVNFlag(kDependsOnElementsKind);
}
ElementsKind kind = map->elements_kind(); Map* transitioned_fast_element_map =
bool packed = IsFastPackedElementsKind(kind); map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { ASSERT(transitioned_fast_element_map == NULL ||
kind = GetNextMoreGeneralFastElementsKind(kind, packed); map->elements_kind() != FAST_ELEMENTS);
Map* transitioned_map = if (transitioned_fast_element_map != NULL) {
map->LookupElementsTransitionMap(kind, NULL); map_set->Add(Handle<Map>(transitioned_fast_element_map));
if (transitioned_map) { }
map_set->Add(Handle<Map>(transitioned_map)); Map* transitioned_double_map =
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
map_set->Add(Handle<Map>(transitioned_double_map));
} }
};
map_set->Sort(); map_set->Sort();
return check_map; return check_map;
} }
@ -3939,28 +3946,15 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; } virtual bool DataEquals(HValue* other) { return true; }
}; };
class ArrayInstructionInterface {
public:
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
};
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; class HLoadKeyedFastElement: public HTemplateInstruction<2> {
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public: public:
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
HLoadKeyedFastElement(HValue* obj, HLoadKeyedFastElement(HValue* obj,
HValue* key, HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: hole_check_mode_(hole_check_mode), : hole_check_mode_(hole_check_mode) {
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, key); SetOperandAt(1, key);
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
@ -3970,12 +3964,6 @@ class HLoadKeyedFastElement
HValue* object() { return OperandAt(0); } HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32. // The key is supposed to be Integer32.
@ -3994,28 +3982,17 @@ class HLoadKeyedFastElement
virtual bool DataEquals(HValue* other) { virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false; if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other); HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
return false;
return hole_check_mode_ == other_load->hole_check_mode_; return hole_check_mode_ == other_load->hole_check_mode_;
} }
private: private:
HoleCheckMode hole_check_mode_; HoleCheckMode hole_check_mode_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
class HLoadKeyedFastDoubleElement class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public: public:
HLoadKeyedFastDoubleElement( HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements); SetOperandAt(0, elements);
SetOperandAt(1, key); SetOperandAt(1, key);
set_representation(Representation::Double()); set_representation(Representation::Double());
@ -4025,12 +4002,6 @@ class HLoadKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); } HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) { virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32. // The key is supposed to be Integer32.
@ -4039,38 +4010,21 @@ class HLoadKeyedFastDoubleElement
: Representation::Integer32(); : Representation::Integer32();
} }
bool RequiresHoleCheck() {
return hole_check_mode_ == PERFORM_HOLE_CHECK;
}
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement) DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected: protected:
virtual bool DataEquals(HValue* other) { virtual bool DataEquals(HValue* other) { return true; }
if (!other->IsLoadKeyedFastDoubleElement()) return false;
HLoadKeyedFastDoubleElement* other_load =
HLoadKeyedFastDoubleElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
uint32_t index_offset_;
bool is_dehoisted_;
HoleCheckMode hole_check_mode_;
}; };
class HLoadKeyedSpecializedArrayElement class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public: public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements, HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key, HValue* key,
ElementsKind elements_kind) ElementsKind elements_kind)
: elements_kind_(elements_kind), : elements_kind_(elements_kind) {
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements); SetOperandAt(0, external_elements);
SetOperandAt(1, key); SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@ -4098,12 +4052,6 @@ class HLoadKeyedSpecializedArrayElement
HValue* external_pointer() { return OperandAt(0); } HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; } ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone); virtual Range* InferRange(Zone* zone);
@ -4119,8 +4067,6 @@ class HLoadKeyedSpecializedArrayElement
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
@ -4242,12 +4188,11 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
}; };
class HStoreKeyedFastElement class HStoreKeyedFastElement: public HTemplateInstruction<3> {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val, HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS) ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { : elements_kind_(elements_kind) {
SetOperandAt(0, obj); SetOperandAt(0, obj);
SetOperandAt(1, key); SetOperandAt(1, key);
SetOperandAt(2, val); SetOperandAt(2, val);
@ -4265,14 +4210,8 @@ class HStoreKeyedFastElement
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
bool value_is_smi() { bool value_is_smi() {
return IsFastSmiElementsKind(elements_kind_); return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
} }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() { bool NeedsWriteBarrier() {
if (value_is_smi()) { if (value_is_smi()) {
@ -4288,18 +4227,14 @@ class HStoreKeyedFastElement
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
class HStoreKeyedFastDoubleElement class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedFastDoubleElement(HValue* elements, HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key, HValue* key,
HValue* val) HValue* val) {
: index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, elements); SetOperandAt(0, elements);
SetOperandAt(1, key); SetOperandAt(1, key);
SetOperandAt(2, val); SetOperandAt(2, val);
@ -4319,12 +4254,6 @@ class HStoreKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); } HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() { bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value()); return StoringValueNeedsWriteBarrier(value());
@ -4335,21 +4264,16 @@ class HStoreKeyedFastDoubleElement
virtual void PrintDataTo(StringStream* stream); virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement) DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
private:
uint32_t index_offset_;
bool is_dehoisted_;
}; };
class HStoreKeyedSpecializedArrayElement class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public: public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements, HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key, HValue* key,
HValue* val, HValue* val,
ElementsKind elements_kind) ElementsKind elements_kind)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { : elements_kind_(elements_kind) {
SetGVNFlag(kChangesSpecializedArrayElements); SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements); SetOperandAt(0, external_elements);
SetOperandAt(1, key); SetOperandAt(1, key);
@ -4377,19 +4301,11 @@ class HStoreKeyedSpecializedArrayElement
HValue* key() { return OperandAt(1); } HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); } HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; } ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement) DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private: private:
ElementsKind elements_kind_; ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
}; };
@ -4436,19 +4352,9 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) { transitioned_map_(transitioned_map) {
SetOperandAt(0, object); SetOperandAt(0, object);
SetFlag(kUseGVN); SetFlag(kUseGVN);
// Don't set GVN DependOn flags here. That would defeat GVN's detection of
// congruent HTransitionElementsKind instructions. Instruction hoisting
// handles HTransitionElementsKind instruction specially, explicitly adding
// DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind); SetGVNFlag(kChangesElementsKind);
if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer); SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion); SetGVNFlag(kChangesNewSpacePromotion);
}
if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
} }
@ -4686,7 +4592,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); } HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const { ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) { if (!boilerplate_object_->IsJSObject()) {
return TERMINAL_FAST_ELEMENTS_KIND; return FAST_ELEMENTS;
} }
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind(); return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
} }

313
deps/v8/src/hydrogen.cc

@ -1709,23 +1709,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags); bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (instr->IsTransitionElementsKind()) { if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the // It's possible to hoist transitions out of a loop as long as the
// hoisting wouldn't move the transition past an instruction that has a // hoisting wouldn't move the transition past a DependsOn of one of it's
// DependsOn flag for anything it changes. // changes or any instructions that might change an objects map or
// elements contents.
GVNFlagSet changes = instr->ChangesFlags();
GVNFlagSet hoist_depends_blockers = GVNFlagSet hoist_depends_blockers =
HValue::ConvertChangesToDependsFlags(instr->ChangesFlags()); HValue::ConvertChangesToDependsFlags(changes);
// In addition to not hoisting transitions above other instructions that
// In addition, the transition must not be hoisted above elements kind // change dependencies that the transition changes, it must not be
// changes, or if the transition is destructive to the elements buffer, // hoisted above map changes and stores to an elements backing store
// changes to array pointer or array contents. // that the transition might change.
GVNFlagSet hoist_change_blockers; GVNFlagSet hoist_change_blockers = changes;
hoist_change_blockers.Add(kChangesElementsKind); hoist_change_blockers.Add(kChangesMaps);
HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr); HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
if (trans->original_map()->has_fast_double_elements()) { if (trans->original_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesDoubleArrayElements); hoist_change_blockers.Add(kChangesDoubleArrayElements);
} }
if (trans->transitioned_map()->has_fast_double_elements()) { if (trans->transitioned_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesArrayElements); hoist_change_blockers.Add(kChangesArrayElements);
} }
if (FLAG_trace_gvn) { if (FLAG_trace_gvn) {
@ -2758,7 +2758,6 @@ HGraph* HGraphBuilder::CreateGraph() {
sce.Process(); sce.Process();
graph()->EliminateRedundantBoundsChecks(); graph()->EliminateRedundantBoundsChecks();
graph()->DehoistSimpleArrayIndexComputations();
return graph(); return graph();
} }
@ -3017,6 +3016,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
HBoundsCheck* check = HBoundsCheck::cast(i); HBoundsCheck* check = HBoundsCheck::cast(i);
check->ReplaceAllUsesWith(check->index()); check->ReplaceAllUsesWith(check->index());
isolate()->counters()->array_bounds_checks_seen()->Increment();
if (!FLAG_array_bounds_checks_elimination) continue; if (!FLAG_array_bounds_checks_elimination) continue;
int32_t offset; int32_t offset;
@ -3035,8 +3035,10 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
*data_p = bb_data_list; *data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) { } else if (data->OffsetIsCovered(offset)) {
check->DeleteAndReplaceWith(NULL); check->DeleteAndReplaceWith(NULL);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else if (data->BasicBlock() == bb) { } else if (data->BasicBlock() == bb) {
data->CoverCheck(check, offset); data->CoverCheck(check, offset);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else { } else {
int32_t new_lower_offset = offset < data->LowerOffset() int32_t new_lower_offset = offset < data->LowerOffset()
? offset ? offset
@ -3080,93 +3082,6 @@ void HGraph::EliminateRedundantBoundsChecks() {
} }
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey();
HConstant* constant;
HValue* subexpression;
int32_t sign;
if (index->IsAdd()) {
sign = 1;
HAdd* add = HAdd::cast(index);
if (add->left()->IsConstant()) {
subexpression = add->right();
constant = HConstant::cast(add->left());
} else if (add->right()->IsConstant()) {
subexpression = add->left();
constant = HConstant::cast(add->right());
} else {
return;
}
} else if (index->IsSub()) {
sign = -1;
HSub* sub = HSub::cast(index);
if (sub->left()->IsConstant()) {
subexpression = sub->right();
constant = HConstant::cast(sub->left());
} else if (sub->right()->IsConstant()) {
subexpression = sub->left();
constant = HConstant::cast(sub->right());
} return;
} else {
return;
}
if (!constant->HasInteger32Value()) return;
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
if (value >= 1 << 30 || value < 0) return;
array_operation->SetKey(subexpression);
if (index->HasNoUses()) {
index->DeleteAndReplaceWith(NULL);
}
ASSERT(value >= 0);
array_operation->SetIndexOffset(static_cast<uint32_t>(value));
array_operation->SetDehoisted(true);
}
void HGraph::DehoistSimpleArrayIndexComputations() {
if (!FLAG_array_index_dehoisting) return;
HPhase phase("H_Dehoist index computations", this);
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
instr != NULL;
instr = instr->next()) {
ArrayInstructionInterface* array_instruction = NULL;
if (instr->IsLoadKeyedFastElement()) {
HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedFastDoubleElement()) {
HLoadKeyedFastDoubleElement* op =
HLoadKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedSpecializedArrayElement()) {
HLoadKeyedSpecializedArrayElement* op =
HLoadKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastElement()) {
HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastDoubleElement()) {
HStoreKeyedFastDoubleElement* op =
HStoreKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedSpecializedArrayElement()) {
HStoreKeyedSpecializedArrayElement* op =
HStoreKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else {
continue;
}
DehoistArrayIndex(array_instruction);
}
}
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr); current_block()->AddInstruction(instr);
@ -3966,7 +3881,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
new(zone()) HLoadKeyedFastElement( new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache. environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index. environment()->ExpressionStackAt(0), // Iteration index.
OMIT_HOLE_CHECK)); HLoadKeyedFastElement::OMIT_HOLE_CHECK));
// Check if the expected map still matches that of the enumerable. // Check if the expected map still matches that of the enumerable.
// If not just deoptimize. // If not just deoptimize.
@ -4257,7 +4172,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) { elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) { if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length()); *total_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastObjectElements()) { } else if (boilerplate->HasFastElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length(); int length = elements->length();
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
@ -4464,13 +4379,11 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Representation::Integer32())); Representation::Integer32()));
switch (boilerplate_elements_kind) { switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check. // Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(value)); AddInstruction(new(zone()) HCheckSmi(value));
// Fall through. // Fall through.
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastElement( AddInstruction(new(zone()) HStoreKeyedFastElement(
elements, elements,
key, key,
@ -4478,7 +4391,6 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind)); boilerplate_elements_kind));
break; break;
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements, AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
key, key,
value)); value));
@ -5236,12 +5148,9 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
break; break;
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -5266,16 +5175,13 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
ASSERT(val != NULL); ASSERT(val != NULL);
switch (elements_kind) { switch (elements_kind) {
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return new(zone()) HStoreKeyedFastDoubleElement( return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val); elements, checked_key, val);
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check. // Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(val)); AddInstruction(new(zone()) HCheckSmi(val));
// Fall through. // Fall through.
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return new(zone()) HStoreKeyedFastElement( return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind); elements, checked_key, val, elements_kind);
default: default:
@ -5284,13 +5190,10 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
} }
} }
// It's an element load (!is_store). // It's an element load (!is_store).
HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ? if (elements_kind == FAST_DOUBLE_ELEMENTS) {
OMIT_HOLE_CHECK : return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
PERFORM_HOLE_CHECK; } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
if (IsFastDoubleElementsKind(elements_kind)) { return new(zone()) HLoadKeyedFastElement(elements, checked_key);
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
} else { // Smi or Object elements.
return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode);
} }
} }
@ -5298,30 +5201,15 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key, HValue* key,
HValue* val, HValue* val,
HValue* dependency,
Handle<Map> map, Handle<Map> map,
bool is_store) { bool is_store) {
HInstruction* mapcheck = HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
AddInstruction(new(zone()) HCheckMaps(object, map, dependency)); bool fast_smi_only_elements = map->has_fast_smi_only_elements();
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency bool fast_elements = map->has_fast_elements();
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
// ElementsKind transitions. Finally, the dependency can be removed for stores
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if (dependency ||
(map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
(map->elements_kind() == FAST_ELEMENTS && is_store)) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
bool fast_smi_only_elements = map->has_fast_smi_elements();
bool fast_elements = map->has_fast_object_elements();
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object)); HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) { if (is_store && (fast_elements || fast_smi_only_elements)) {
HCheckMaps* check_cow_map = new(zone()) HCheckMaps( AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map()); elements, isolate()->factory()->fixed_array_map()));
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
} }
HInstruction* length = NULL; HInstruction* length = NULL;
HInstruction* checked_key = NULL; HInstruction* checked_key = NULL;
@ -5374,8 +5262,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (int i = 0; i < maps->length(); ++i) { for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i); Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind(); ElementsKind elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind) && if (elements_kind == FAST_DOUBLE_ELEMENTS ||
elements_kind != GetInitialFastElementsKind()) { elements_kind == FAST_ELEMENTS) {
possible_transitioned_maps.Add(map); possible_transitioned_maps.Add(map);
} }
} }
@ -5389,17 +5277,12 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
int num_untransitionable_maps = 0; int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map; Handle<Map> untransitionable_map;
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) { for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i); Handle<Map> map = maps->at(i);
ASSERT(map->IsMap()); ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) { if (!transition_target.at(i).is_null()) {
ASSERT(Map::IsValidElementsTransition( AddInstruction(new(zone()) HTransitionElementsKind(
map->elements_kind(), object, map, transition_target.at(i)));
transition_target.at(i)->elements_kind()));
transition = new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i));
AddInstruction(transition);
} else { } else {
type_todo[map->elements_kind()] = true; type_todo[map->elements_kind()] = true;
if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) { if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
@ -5419,7 +5302,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key)); : BuildLoadKeyedGeneric(object, key));
} else { } else {
instr = AddInstruction(BuildMonomorphicElementAccess( instr = AddInstruction(BuildMonomorphicElementAccess(
object, key, val, transition, untransitionable_map, is_store)); object, key, val, untransitionable_map, is_store));
} }
*has_side_effects |= instr->HasObservableSideEffects(); *has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position); instr->set_position(position);
@ -5436,18 +5319,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL; HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL; HInstruction* checked_key = NULL;
// Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
// are handled before external arrays. // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); // arrays.
STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND; for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND; elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) { elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
// code that's executed for all external array cases. // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
// that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND == STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND); LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -5469,8 +5354,10 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true); set_current_block(if_true);
HInstruction* access; HInstruction* access;
if (IsFastElementsKind(elements_kind)) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
if (is_store && !IsFastDoubleElementsKind(elements_kind)) { elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMaps( AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(), elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch)); elements_kind_branch));
@ -5557,7 +5444,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
: BuildLoadKeyedGeneric(obj, key); : BuildLoadKeyedGeneric(obj, key);
} else { } else {
AddInstruction(new(zone()) HCheckNonSmi(obj)); AddInstruction(new(zone()) HCheckNonSmi(obj));
instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store); instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
} }
} else if (expr->GetReceiverTypes() != NULL && } else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) { !expr->GetReceiverTypes()->is_empty()) {
@ -5775,39 +5662,6 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
} }
class FunctionSorter {
public:
FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
FunctionSorter(int index, int ticks, int ast_length, int src_length)
: index_(index),
ticks_(ticks),
ast_length_(ast_length),
src_length_(src_length) { }
int index() const { return index_; }
int ticks() const { return ticks_; }
int ast_length() const { return ast_length_; }
int src_length() const { return src_length_; }
private:
int index_;
int ticks_;
int ast_length_;
int src_length_;
};
static int CompareHotness(void const* a, void const* b) {
FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
int diff = function1->ticks() - function2->ticks();
if (diff != 0) return -diff;
diff = function1->ast_length() - function2->ast_length();
if (diff != 0) return diff;
return function1->src_length() - function2->src_length();
}
void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HValue* receiver, HValue* receiver,
SmallMapList* types, SmallMapList* types,
@ -5816,35 +5670,17 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// maps are identical. In that case we can avoid repeatedly generating the // maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks. // same prototype map checks.
int argument_count = expr->arguments()->length() + 1; // Includes receiver. int argument_count = expr->arguments()->length() + 1; // Includes receiver.
int count = 0;
HBasicBlock* join = NULL; HBasicBlock* join = NULL;
FunctionSorter order[kMaxCallPolymorphism]; for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
int ordered_functions = 0;
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
Handle<Map> map = types->at(i); Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) { if (expr->ComputeTarget(map, name)) {
order[ordered_functions++] = if (count == 0) {
FunctionSorter(i,
expr->target()->shared()->profiler_ticks(),
InliningAstSize(expr->target()),
expr->target()->shared()->SourceSize());
}
}
qsort(reinterpret_cast<void*>(&order[0]),
ordered_functions,
sizeof(order[0]),
&CompareHotness);
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
Handle<Map> map = types->at(i);
if (fn == 0) {
// Only needed once. // Only needed once.
AddInstruction(new(zone()) HCheckNonSmi(receiver)); AddInstruction(new(zone()) HCheckNonSmi(receiver));
join = graph()->CreateBasicBlock(); join = graph()->CreateBasicBlock();
} }
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock(); HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock(); HBasicBlock* if_false = graph()->CreateBasicBlock();
HCompareMap* compare = HCompareMap* compare =
@ -5852,15 +5688,10 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
current_block()->Finish(compare); current_block()->Finish(compare);
set_current_block(if_true); set_current_block(if_true);
expr->ComputeTarget(map, name);
AddCheckConstantFunction(expr, receiver, map, false); AddCheckConstantFunction(expr, receiver, map, false);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) { if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
Handle<JSFunction> caller = info()->closure(); PrintF("Trying to inline the polymorphic call to %s\n",
SmartArrayPointer<char> caller_name = *name->ToCString());
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
*name->ToCString(),
*caller_name);
} }
if (FLAG_polymorphic_inlining && TryInlineCall(expr)) { if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the // Trying to inline will signal that we should bailout from the
@ -5878,11 +5709,12 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (current_block() != NULL) current_block()->Goto(join); if (current_block() != NULL) current_block()->Goto(join);
set_current_block(if_false); set_current_block(if_false);
} }
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we // Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise // know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC. // use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) { if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses); current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else { } else {
HValue* context = environment()->LookupContext(); HValue* context = environment()->LookupContext();
@ -5931,11 +5763,14 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
} }
static const int kNotInlinable = 1000000000; bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) { HValue* receiver,
if (!FLAG_use_inlining) return kNotInlinable; int ast_id,
int return_id,
ReturnHandlingFlag return_handling) {
if (!FLAG_use_inlining) return false;
// Precondition: call is monomorphic and we have found a target with the // Precondition: call is monomorphic and we have found a target with the
// appropriate arity. // appropriate arity.
@ -5947,43 +5782,25 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->SourceSize() > if (target_shared->SourceSize() >
Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) { Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
TraceInline(target, caller, "target text too big"); TraceInline(target, caller, "target text too big");
return kNotInlinable; return false;
} }
// Target must be inlineable. // Target must be inlineable.
if (!target->IsInlineable()) { if (!target->IsInlineable()) {
TraceInline(target, caller, "target not inlineable"); TraceInline(target, caller, "target not inlineable");
return kNotInlinable; return false;
} }
if (target_shared->dont_inline() || target_shared->dont_optimize()) { if (target_shared->dont_inline() || target_shared->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [early]"); TraceInline(target, caller, "target contains unsupported syntax [early]");
return kNotInlinable; return false;
} }
int nodes_added = target_shared->ast_node_count(); int nodes_added = target_shared->ast_node_count();
return nodes_added;
}
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
HValue* receiver,
int ast_id,
int return_id,
ReturnHandlingFlag return_handling) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
Handle<JSFunction> caller = info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) { if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]"); TraceInline(target, caller, "target AST is too large [early]");
return false; return false;
} }
Handle<SharedFunctionInfo> target_shared(target->shared());
#if !defined(V8_TARGET_ARCH_IA32) #if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context. // Target must be able to use caller's context.
CompilationInfo* outer_info = info(); CompilationInfo* outer_info = info();

3
deps/v8/src/hydrogen.h

@ -267,7 +267,6 @@ class HGraph: public ZoneObject {
void AssignDominators(); void AssignDominators();
void ReplaceCheckedValues(); void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks(); void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark(); void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object // Returns false if there are phi-uses of the arguments-object
@ -1011,7 +1010,6 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern. // Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr); bool TryCallApply(Call* expr);
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind, bool TryInline(CallKind call_kind,
Handle<JSFunction> target, Handle<JSFunction> target,
ZoneList<Expression*>* arguments, ZoneList<Expression*>* arguments,
@ -1093,7 +1091,6 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object, HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key, HValue* key,
HValue* val, HValue* val,
HValue* dependency,
Handle<Map> map, Handle<Map> map,
bool is_store); bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object, HValue* HandlePolymorphicElementAccess(HValue* object,

3
deps/v8/src/ia32/assembler-ia32.h

@ -640,9 +640,6 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70; static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry; static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Code generation // Code generation

9
deps/v8/src/ia32/builtins-ia32.cc

@ -900,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements; const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0); STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false); __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the // Allocate the JSArray object together with space for a fixed array with the
// requested elements. // requested elements.
@ -1003,8 +1003,7 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch, __ LoadInitialArrayMap(array_function, scratch, elements_array);
elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the // Allocate the JSArray object together with space for a FixedArray with the
// requested elements. // requested elements.
@ -1275,11 +1274,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call); __ jmp(&prepare_generic_code_call);
__ bind(&not_double); __ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0)); __ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional( __ LoadTransitionedArrayMapConditional(
FAST_SMI_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
edi, edi,
eax, eax,

38
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3822,24 +3822,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1); __ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 9; static const int kRegExpExecuteArguments = 8;
__ EnterApiExitFrame(kRegExpExecuteArguments); __ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 9: Pass current isolate address. // Argument 8: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize), __ mov(Operand(esp, 7 * kPointerSize),
Immediate(ExternalReference::isolate_address())); Immediate(ExternalReference::isolate_address()));
// Argument 8: Indicate that this is a direct call from JavaScript. // Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area. // Argument 6: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ mov(Operand(esp, 6 * kPointerSize), esi); __ mov(Operand(esp, 5 * kPointerSize), esi);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
// Argument 5: static offsets vector buffer. // Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize), __ mov(Operand(esp, 4 * kPointerSize),
@ -3902,9 +3898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result. // Check the result.
Label success; Label success;
__ cmp(eax, 1); __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ j(equal, &success); __ j(equal, &success);
Label failure; Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE); __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@ -7063,8 +7057,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET}, { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET}, { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateMapChangeElementTransition // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET}, { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET}, { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@ -7336,9 +7330,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements); __ CheckFastElements(edi, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(eax, &smi_element); __ JumpIfSmi(eax, &smi_element);
__ CheckFastSmiElements(edi, &fast_elements, Label::kNear); __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into // Store into the array literal requires a elements transition. Call into
// the runtime. // the runtime.
@ -7360,7 +7354,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx); __ pop(edx);
__ jmp(&slow_elements); __ jmp(&slow_elements);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements); __ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@ -7373,15 +7367,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK); OMIT_SMI_CHECK);
__ ret(0); __ ret(0);
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// and value is Smi. // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element); __ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size, __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax); FixedArrayBase::kHeaderSize), eax);
__ ret(0); __ ret(0);
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements); __ bind(&double_elements);
__ push(edx); __ push(edx);

4
deps/v8/src/ia32/codegen-ia32.cc

@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
} }
void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) { MacroAssembler* masm, Label* fail) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value

31
deps/v8/src/ia32/debug-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -91,12 +91,10 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
} }
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs, RegList object_regs,
RegList non_object_regs, RegList non_object_regs,
@ -105,13 +103,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
__ push(Immediate(Smi::FromInt(
Debug::FramePaddingLayout::kPaddingValue)));
}
__ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
// Store the registers containing live values on the expression stack to // Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values // make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC. // are stored as a smi causing it to be untouched by GC.
@ -143,10 +134,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Automatically find register that could be used after register restore.
// We need one register for padding skip instructions.
Register unused_reg = { -1 };
// Restore the register values containing object pointers from the // Restore the register values containing object pointers from the
// expression stack. // expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) { for (int i = kNumJSCallerSaved; --i >= 0;) {
@ -155,29 +142,15 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue)); __ Set(reg, Immediate(kDebugZapValue));
} }
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) { if ((object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
taken = true;
} }
if ((non_object_regs & (1 << r)) != 0) { if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg); __ pop(reg);
__ SmiUntag(reg); __ SmiUntag(reg);
taken = true;
}
if (!taken) {
unused_reg = reg;
} }
} }
ASSERT(unused_reg.code() != -1);
// Read current padding counter and skip corresponding number of words.
__ pop(unused_reg);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
// Get rid of the internal frame. // Get rid of the internal frame.
} }

16
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1649,8 +1649,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length()); ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind = ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_constant_fast_elements = bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values( Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1))); FixedArrayBase::cast(constant_elements->get(1)));
@ -1661,7 +1660,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap(); Heap* heap = isolate()->heap();
if (has_constant_fast_elements && if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) { constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance. // change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub( FastCloneShallowArrayStub stub(
@ -1673,9 +1672,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else { } else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays); FLAG_smi_only_arrays);
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance. // change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1703,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} }
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) { if (constant_elements_kind == FAST_ELEMENTS) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
// cannot transition and don't need to call the runtime stub. // transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal. __ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));

38
deps/v8/src/ia32/ic-ia32.cc

@ -889,25 +889,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
&non_double_value, &non_double_value,
DONT_DO_SMI_CHECK); DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// and complete the store. // FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS, FAST_DOUBLE_ELEMENTS,
ebx, ebx,
edi, edi,
&slow); &slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
__ bind(&non_double_value); __ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
ebx, ebx,
edi, edi,
&slow); &slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
@ -1622,7 +1622,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax. // Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
Label fail; Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(eax, edx); __ mov(eax, edx);
__ Ret(); __ Ret();
__ bind(&fail); __ bind(&fail);
@ -1727,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code. // Activate inlined smi code.
if (previous_state == UNINITIALIZED) { if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); PatchInlinedSmiCode(address());
} }
} }
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { void PatchInlinedSmiCode(Address address) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset; address + Assembler::kCallTargetAddressOffset;
@ -1753,18 +1753,14 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
address, test_instruction_address, delta); address, test_instruction_address, delta);
} }
// Patch with a short conditional jump. Enabling means switching from a short // Patch with a short conditional jump. There must be a
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the // short jump-if-carry/not-carry at this position.
// reverse operation of that.
Address jmp_address = test_instruction_address - delta; Address jmp_address = test_instruction_address - delta;
ASSERT((check == ENABLE_INLINED_SMI_CHECK) ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
? (*jmp_address == Assembler::kJncShortOpcode || *jmp_address == Assembler::kJcShortOpcode);
*jmp_address == Assembler::kJcShortOpcode) Condition cc = *jmp_address == Assembler::kJncShortOpcode
: (*jmp_address == Assembler::kJnzShortOpcode || ? not_zero
*jmp_address == Assembler::kJzShortOpcode)); : zero;
Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc); *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
} }

163
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -2274,35 +2274,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length(); int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(no_condition, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name(); Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done; Label done;
for (int i = 0; i < map_count; ++i) { for (int i = 0; i < map_count - 1; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i); Handle<Map> map = instr->hydrogen()->types()->at(i);
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next; Label next;
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
__ j(not_equal, &next, Label::kNear); __ j(not_equal, &next, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name); EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear); __ jmp(&done, Label::kNear);
__ bind(&next); __ bind(&next);
} }
} Handle<Map> map = instr->hydrogen()->types()->last();
if (need_generic) { __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
if (instr->hydrogen()->need_generic()) {
Label generic;
__ j(not_equal, &generic, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear);
__ bind(&generic);
__ mov(ecx, name); __ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} }
__ bind(&done); __ bind(&done);
}
} }
@ -2377,10 +2382,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset)); __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask); __ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift); __ shr(temp, Map::kElementsKindShift);
__ cmp(temp, GetInitialFastElementsKind()); __ cmp(temp, FAST_ELEMENTS);
__ j(less, &fail, Label::kNear); __ j(equal, &ok, Label::kNear);
__ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear); __ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
@ -2423,11 +2426,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result. // Load the result.
__ mov(result, __ mov(result,
BuildFastArrayOperand(instr->elements(), BuildFastArrayOperand(instr->elements(), instr->key(),
instr->key(),
FAST_ELEMENTS, FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag, FixedArray::kHeaderSize - kHeapObjectTag));
instr->additional_index()));
// Check for the hole value. // Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2441,24 +2442,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) { LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32); sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand( Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS, FAST_DOUBLE_ELEMENTS,
offset, offset);
instr->additional_index());
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand( Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
instr->key(), FixedDoubleArray::kHeaderSize - kHeapObjectTag);
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ movdbl(result, double_load_operand); __ movdbl(result, double_load_operand);
} }
@ -2467,8 +2462,7 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer, LOperand* elements_pointer,
LOperand* key, LOperand* key,
ElementsKind elements_kind, ElementsKind elements_kind,
uint32_t offset, uint32_t offset) {
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer); Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) { if (key->IsConstantOperand()) {
@ -2477,14 +2471,10 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort("array index constant value too big"); Abort("array index constant value too big");
} }
return Operand(elements_pointer_reg, return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size) constant_value * (1 << shift_size) + offset);
+ offset);
} else { } else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg, return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
ToRegister(key),
scale_factor,
offset + (additional_index << shift_size));
} }
} }
@ -2493,10 +2483,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) { LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind(); ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(), Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(), instr->key(), elements_kind, 0));
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result())); XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand); __ movss(result, operand);
@ -2532,12 +2519,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -2941,13 +2925,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmp(output_reg, 0x80000000u); __ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
} else { } else {
Label negative_sign;
Label done; Label done;
// Deoptimize on unordered. // Deoptimize on negative numbers.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register. __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch); __ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr->environment()); DeoptimizeIf(below, instr->environment());
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero. // Check for negative zero.
@ -2963,21 +2945,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Use truncating instruction (OK because input is positive). // Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg)); __ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint. // Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u); __ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment()); DeoptimizeIf(equal, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here
__ bind(&negative_sign);
// Truncate, then compare and compensate
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr->environment());
__ bind(&done); __ bind(&done);
} }
} }
@ -3436,10 +3407,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) { LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind(); ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(), Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(), instr->key(), elements_kind, 0));
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0); __ movss(operand, xmm0);
@ -3463,12 +3431,9 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break; break;
case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3483,21 +3448,31 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object()); Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand( // Do the store.
instr->object(), if (instr->key()->IsConstantOperand()) {
instr->key(), ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
FAST_ELEMENTS, LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
FixedArray::kHeaderSize - kHeapObjectTag, int offset =
instr->additional_index()); ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(operand, value); __ mov(FieldOperand(elements, offset), value);
} else {
__ mov(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) { if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type(); HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed = SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register. // Compute address of modified element and store it into key register.
__ lea(key, operand); __ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, __ RecordWrite(elements,
key, key,
value, value,
@ -3525,11 +3500,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} }
Operand double_store_operand = BuildFastArrayOperand( Operand double_store_operand = BuildFastArrayOperand(
instr->elements(), instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
instr->key(), FixedDoubleArray::kHeaderSize - kHeapObjectTag);
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ movdbl(double_store_operand, value); __ movdbl(double_store_operand, value);
} }
@ -3560,23 +3532,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable); __ j(not_equal, &not_applicable);
__ mov(new_map_reg, to_map); __ mov(new_map_reg, to_map);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) { if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register object_reg = ToRegister(instr->object()); Register object_reg = ToRegister(instr->object());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier. // Write barrier.
ASSERT_NE(instr->temp_reg(), NULL); ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs); ToRegister(instr->temp_reg()), kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) && } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
IsFastDoubleElementsKind(to_kind)) { to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx)); ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx)); ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg); __ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr); RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) && } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx)); ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx)); ASSERT(new_map_reg.is(ebx));
@ -4436,9 +4407,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has // than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // already been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object()); __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte, // Load the map's "bit field 2". We only need the first byte,
@ -4600,9 +4570,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Deopt if the literal boilerplate ElementsKind is of a type different than // Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already // the expected one. The check isn't necessary if the boilerplate has already
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate()); __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte, // Load the map's "bit field 2". We only need the first byte,

3
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -242,8 +242,7 @@ class LCodeGen BASE_EMBEDDED {
Operand BuildFastArrayOperand(LOperand* elements_pointer, Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key, LOperand* key,
ElementsKind elements_kind, ElementsKind elements_kind,
uint32_t offset, uint32_t offset);
uint32_t additional_index = 0);
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr); void EmitIntegerMathAbs(LUnaryMathOperation* instr);

8
deps/v8/src/ia32/lithium-ia32.cc

@ -1990,7 +1990,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer()); LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key()); LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result = LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
key);
LInstruction* load_instr = DefineAsRegister(result); LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it // An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment. // has an environment.
@ -2092,9 +2093,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind(); if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
ElementsKind to_kind = instr->transitioned_map()->elements_kind(); instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object()); LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister(); LOperand* temp_reg = TempRegister();

12
deps/v8/src/ia32/lithium-ia32.h

@ -1238,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { LLoadKeyedFastDoubleElement(LOperand* elements,
LOperand* key) {
inputs_[0] = elements; inputs_[0] = elements;
inputs_[1] = key; inputs_[1] = key;
} }
@ -1255,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public: public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer; inputs_[0] = external_pointer;
inputs_[1] = key; inputs_[1] = key;
} }
@ -1275,7 +1275,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1776,7 +1775,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; } LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1799,7 +1797,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
}; };
@ -1825,7 +1822,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };

80
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -382,12 +382,10 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastElements(Register map,
Label* fail, Label* fail,
Label::Distance distance) { Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset), cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue); Map::kMaximumBitField2FastElementValue);
j(above, fail, distance); j(above, fail, distance);
} }
@ -395,26 +393,23 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail, Label* fail,
Label::Distance distance) { Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset), cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue); Map::kMaximumBitField2FastSmiOnlyElementValue);
j(below_equal, fail, distance); j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset), cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue); Map::kMaximumBitField2FastElementValue);
j(above, fail, distance); j(above, fail, distance);
} }
void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Label* fail, Label* fail,
Label::Distance distance) { Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset), cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue); Map::kMaximumBitField2FastSmiOnlyElementValue);
j(above, fail, distance); j(above, fail, distance);
} }
@ -498,18 +493,24 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) { CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map); cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind(); Map* transitioned_fast_element_map(
if (IsFastElementsKind(kind)) { map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
bool packed = IsFastPackedElementsKind(kind); ASSERT(transitioned_fast_element_map == NULL ||
Map* current_map = *map; map->elements_kind() != FAST_ELEMENTS);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { if (transitioned_fast_element_map != NULL) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
j(equal, early_success, Label::kNear); j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset), cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(current_map)); Handle<Map>(transitioned_fast_element_map));
} }
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_double_map));
} }
} }
} }
@ -2160,38 +2161,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset)); mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map. // Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch, int expected_index =
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); Context::GetContextMapIndexFromElementsKind(expected_kind);
cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match); j(not_equal, no_map_match);
// Use the transitioned cached map. // Use the transitioned cached map.
offset = transitioned_kind * kPointerSize + int trans_index =
FixedArrayBase::kHeaderSize; Context::GetContextMapIndexFromElementsKind(transitioned_kind);
mov(map_in_out, FieldOperand(scratch, offset)); mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
} }
void MacroAssembler::LoadInitialArrayMap( void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register function_in, Register scratch, Register map_out) {
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out)); ASSERT(!function_in.is(map_out));
Label done; Label done;
mov(map_out, FieldOperand(function_in, mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset)); JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out, map_out,
scratch, scratch,
&done); &done);
@ -2576,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size) CodePatcher::CodePatcher(byte* address, int size)
: address_(address), : address_(address),
size_(size), size_(size),
masm_(NULL, address, size + Assembler::kGap) { masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size // The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints. // bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/ia32/macro-assembler-ia32.h

@ -235,8 +235,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction. // Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in, void LoadInitialArrayMap(Register function_in,
Register scratch, Register scratch,
Register map_out, Register map_out);
bool can_have_holes);
// Load the global function with the given index. // Load the global function with the given index.
void LoadGlobalFunction(int index, Register function); void LoadGlobalFunction(int index, Register function);
@ -358,7 +357,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only // Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not. // elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map, void CheckFastSmiOnlyElements(Register map,
Label* fail, Label* fail,
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);

148
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -42,30 +42,28 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
/* /*
* This assembler uses the following register assignment convention * This assembler uses the following register assignment convention
* - edx : Current character. Must be loaded using LoadCurrentCharacter * - edx : current character. Must be loaded using LoadCurrentCharacter
* before using any of the dispatch methods. Temporarily stores the * before using any of the dispatch methods.
* index of capture start after a matching pass for a global regexp. * - edi : current position in input, as negative offset from end of string.
* - edi : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset! * Please notice that this is the byte offset, not the character offset!
* - esi : end of input (points to byte after last character in input). * - esi : end of input (points to byte after last character in input).
* - ebp : Frame pointer. Used to access arguments, local variables and * - ebp : frame pointer. Used to access arguments, local variables and
* RegExp registers. * RegExp registers.
* - esp : Points to tip of C stack. * - esp : points to tip of C stack.
* - ecx : Points to tip of backtrack stack * - ecx : points to tip of backtrack stack
* *
* The registers eax and ebx are free to use for computations. * The registers eax and ebx are free to use for computations.
* *
* Each call to a public method should retain this convention. * Each call to a public method should retain this convention.
* The stack will have the following structure: * The stack will have the following structure:
* - Isolate* isolate (address of the current isolate) * - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0 * - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system) * call through the runtime system)
* - stack_area_base (high end of the memory area to use as * - stack_area_base (High end of the memory area to use as
* backtracking stack) * backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output). * - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string) * - end of input (Address of end of string)
* - start of input (address of first character in string) * - start of input (Address of first character in string)
* - start index (character index of start) * - start index (character index of start)
* - String* input_string (location of a handle containing the string) * - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) --- * --- frame alignment (if applicable) ---
@ -74,10 +72,9 @@ namespace internal {
* - backup of caller esi * - backup of caller esi
* - backup of caller edi * - backup of caller edi
* - backup of caller ebx * - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character * - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position. * position -1). Used to initialize capture registers to a non-position.
* - register 0 ebp[-4] (only positions must be stored in the first * - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers) * - register 1 ebp[-8] num_saved_registers_ registers)
* - ... * - ...
* *
@ -709,16 +706,13 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() { void RegExpMacroAssemblerIA32::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero. ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) { __ Set(eax, Immediate(0));
__ Set(eax, Immediate(FAILURE));
}
__ jmp(&exit_label_); __ jmp(&exit_label_);
} }
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label return_eax;
// Finalize code - write the entry point code now we know how many // Finalize code - write the entry point code now we know how many
// registers we need. // registers we need.
@ -737,7 +731,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi); __ push(esi);
__ push(edi); __ push(edi);
__ push(ebx); // Callee-save on MacOS. __ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
__ push(Immediate(0)); // Make room for "input start - 1" constant. __ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers. // Check if we have space on the stack for registers.
@ -757,13 +750,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack // Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers. // for our working registers.
__ mov(eax, EXCEPTION); __ mov(eax, EXCEPTION);
__ jmp(&return_eax); __ jmp(&exit_label_);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx); CallCheckStackGuardState(ebx);
__ or_(eax, eax); __ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result. // If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_eax); __ j(not_zero, &exit_label_);
__ bind(&stack_ok); __ bind(&stack_ok);
// Load start index for later use. // Load start index for later use.
@ -790,55 +783,41 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers. // position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax); __ mov(Operand(ebp, kInputStartMinusOne), eax);
#ifdef WIN32
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
#endif // WIN32
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ mov(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1 // Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten // Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows). // page (a problem on Windows).
if (num_saved_registers_ > 8) {
__ mov(ecx, kRegisterZero); __ mov(ecx, kRegisterZero);
Label init_loop; Label init_loop;
__ bind(&init_loop); __ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax); __ mov(Operand(ebp, ecx, times_1, +0), eax);
__ sub(ecx, Immediate(kPointerSize)); __ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop); __ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(register_location(i), eax);
}
} }
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
} }
// Initialize backtrack stack pointer. // Initialize backtrack stack pointer.
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
// Load previous char as initial value of current-character.
Label at_start;
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_); __ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), '\n');
__ jmp(&start_label_);
// Exit code: // Exit code:
if (success_label_.is_linked()) { if (success_label_.is_linked()) {
@ -857,10 +836,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
} }
for (int i = 0; i < num_saved_registers_; i++) { for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i)); __ mov(eax, register_location(i));
if (i == 0 && global()) {
// Keep capture start in edx for the zero-length check later.
__ mov(edx, eax);
}
// Convert to index from start of string, not end. // Convert to index from start of string, not end.
__ add(eax, ecx); __ add(eax, ecx);
if (mode_ == UC16) { if (mode_ == UC16) {
@ -869,54 +844,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebx, i * kPointerSize), eax); __ mov(Operand(ebx, i * kPointerSize), eax);
} }
} }
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ mov(ecx, Operand(ebp, kNumOutputRegisters));
__ sub(ecx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(ecx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ mov(Operand(ebp, kNumOutputRegisters), ecx);
// Advance the location for output.
__ add(Operand(ebp, kRegisterOutput),
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kInputStartMinusOne));
// Special case for zero-length matches.
// edx: capture start index
__ cmp(edi, edx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// edi (offset from the end) is zero if we already reached the end.
__ test(edi, edi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
__ add(edi, Immediate(2));
} else {
__ inc(edi);
}
__ jmp(&load_char_start_regexp);
} else {
__ mov(eax, Immediate(SUCCESS)); __ mov(eax, Immediate(SUCCESS));
} }
} // Exit and return eax
__ bind(&exit_label_); __ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
__ mov(eax, Operand(ebp, kSuccessfulCaptures));
}
__ bind(&return_eax);
// Skip esp past regexp registers. // Skip esp past regexp registers.
__ lea(esp, Operand(ebp, kBackup_ebx)); __ lea(esp, Operand(ebp, kBackup_ebx));
// Restore callee-save registers. // Restore callee-save registers.
@ -946,7 +877,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ or_(eax, eax); __ or_(eax, eax);
// If returning non-zero, we should end execution with the given // If returning non-zero, we should end execution with the given
// result as return value. // result as return value.
__ j(not_zero, &return_eax); __ j(not_zero, &exit_label_);
__ pop(edi); __ pop(edi);
__ pop(backtrack_stackpointer()); __ pop(backtrack_stackpointer());
@ -993,7 +924,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_with_exception); __ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception. // Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(eax, EXCEPTION); __ mov(eax, EXCEPTION);
__ jmp(&return_eax); __ jmp(&exit_label_);
} }
CodeDesc code_desc; CodeDesc code_desc;
@ -1112,9 +1043,8 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
} }
bool RegExpMacroAssemblerIA32::Succeed() { void RegExpMacroAssemblerIA32::Succeed() {
__ jmp(&success_label_); __ jmp(&success_label_);
return global();
} }

13
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2008-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -111,7 +111,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by); virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual bool Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
@ -135,11 +135,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize; static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize; static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize; static const int kRegisterOutput = kInputEnd + kPointerSize;
// For the case of global regular expression, we have room to store at least static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables. // Below the frame pointer - local stack variables.
@ -148,8 +144,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_esi = kFramePointer - kPointerSize; static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize; static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize; static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize; static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack. // First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize; static const int kRegisterZero = kInputStartMinusOne - kPointerSize;

8
deps/v8/src/ia32/simulator-ia32.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*, typedef int (*regexp_matcher)(String*, int, const byte*,
const byte*, int*, int, Address, int, Isolate*); const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should // Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int. // expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) (FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \

27
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1462,31 +1462,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object); __ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out. // In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object); __ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin); __ CheckFastSmiOnlyElements(ebx, &call_builtin);
// edi: elements array // edi: elements array
// edx: receiver // edx: receiver
// ebx: map // ebx: map
Label try_holey_map; __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
ebx, ebx,
edi, edi,
&try_holey_map);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
ebx,
edi,
&call_builtin); &call_builtin);
ElementsTransitionGenerator:: ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
GenerateMapChangeElementsTransition(masm());
// Restore edi. // Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object); __ bind(&fast_object);
@ -3833,7 +3818,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi. // Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic); GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind); __ JumpIfNotSmi(eax, &transition_elements_kind);
} }
@ -3858,7 +3843,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic); __ j(not_equal, &miss_force_generic);
__ bind(&finish_store); __ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) { if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// ecx is a smi, use times_half_pointer_size instead of // ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size // times_pointer_size
__ mov(FieldOperand(edi, __ mov(FieldOperand(edi,
@ -3866,7 +3851,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
times_half_pointer_size, times_half_pointer_size,
FixedArray::kHeaderSize), eax); FixedArray::kHeaderSize), eax);
} else { } else {
ASSERT(IsFastObjectElementsKind(elements_kind)); ASSERT(elements_kind == FAST_ELEMENTS);
// Do the store and update the write barrier. // Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of // ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size // times_pointer_size

85
deps/v8/src/ic.cc

@ -352,9 +352,9 @@ void IC::Clear(Address address) {
return KeyedStoreIC::Clear(address, target); return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target); case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target); case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::COMPARE_IC: return CompareIC::Clear(address, target);
case Code::UNARY_OP_IC: case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC: case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC: case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not // Clearing these is tricky and does not
// make any performance difference. // make any performance difference.
@ -365,8 +365,9 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) { void CallICBase::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state()); bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code = Code* code =
Isolate::Current()->stub_cache()->FindCallInitialize( Isolate::Current()->stub_cache()->FindCallInitialize(
target->arguments_count(), target->arguments_count(),
@ -409,17 +410,6 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
} }
void CompareIC::Clear(Address address, Code* target) {
// Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
if (target->major_key() != CodeStub::CompareIC) return;
// Only clear CompareICs that can retain objects.
if (target->compare_state() != KNOWN_OBJECTS) return;
Token::Value op = CompareIC::ComputeOperation(target);
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
static bool HasInterceptorGetter(JSObject* object) { static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined(); return !object->GetNamedInterceptor()->getter()->IsUndefined();
} }
@ -1644,7 +1634,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
return string_stub(); return string_stub();
} else { } else {
ASSERT(receiver_map->has_dictionary_elements() || ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_smi_or_object_elements() || receiver_map->has_fast_elements() ||
receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_double_elements() || receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements()); receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@ -1659,7 +1650,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
StubKind stub_kind, StubKind stub_kind,
StrictModeFlag strict_mode, StrictModeFlag strict_mode,
Handle<Code> generic_stub) { Handle<Code> generic_stub) {
if (receiver->HasFastSmiOrObjectElements() || if (receiver->HasFastElements() ||
receiver->HasFastSmiOnlyElements() ||
receiver->HasExternalArrayElements() || receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() || receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) { receiver->HasDictionaryElements()) {
@ -1679,26 +1671,15 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_ELEMENTS);
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_DOUBLE_ELEMENTS);
case KeyedIC::LOAD:
case KeyedIC::STORE_NO_TRANSITION:
case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
UNREACHABLE();
break; break;
} default:
UNREACHABLE();
return Handle<Map>::null(); return Handle<Map>::null();
}
} }
@ -1758,56 +1739,32 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
if (allow_growth) { if (allow_growth) {
// Handle growing array in stub if necessary. // Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) { if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) { if (value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE; return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
} }
}
if (value->IsHeapObject()) { if (value->IsHeapObject()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT; return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
} }
}
} else if (receiver->HasFastDoubleElements()) { } else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) { if (!value->IsSmi() && !value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT; return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
} }
} }
}
return STORE_AND_GROW_NO_TRANSITION; return STORE_AND_GROW_NO_TRANSITION;
} else { } else {
// Handle only in-bounds elements accesses. // Handle only in-bounds elements accesses.
if (receiver->HasFastSmiElements()) { if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) { if (value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_TRANSITION_SMI_TO_DOUBLE; return STORE_TRANSITION_SMI_TO_DOUBLE;
}
} else if (value->IsHeapObject()) { } else if (value->IsHeapObject()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_TRANSITION_SMI_TO_OBJECT; return STORE_TRANSITION_SMI_TO_OBJECT;
} }
}
} else if (receiver->HasFastDoubleElements()) { } else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) { if (!value->IsSmi() && !value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_TRANSITION_DOUBLE_TO_OBJECT; return STORE_TRANSITION_DOUBLE_TO_OBJECT;
} }
} }
}
return STORE_NO_TRANSITION; return STORE_NO_TRANSITION;
} }
} }
@ -2439,7 +2396,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code. // Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) { if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK); PatchInlinedSmiCode(ic.address());
} }
} }
@ -2500,14 +2457,6 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
} }
Code* CompareIC::GetRawUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
}
Handle<Code> CompareIC::GetUninitialized(Token::Value op) { Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED); ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode(); return stub.GetCode();
@ -2522,12 +2471,6 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
} }
Token::Value CompareIC::ComputeOperation(Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
return static_cast<Token::Value>(target->compare_operation());
}
const char* CompareIC::GetStateName(State state) { const char* CompareIC::GetStateName(State state) {
switch (state) { switch (state) {
case UNINITIALIZED: return "UNINITIALIZED"; case UNINITIALIZED: return "UNINITIALIZED";

20
deps/v8/src/ic.h

@ -378,16 +378,10 @@ class KeyedIC: public IC {
STORE_TRANSITION_SMI_TO_OBJECT, STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE, STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT, STORE_TRANSITION_DOUBLE_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION, STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT, STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
}; };
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
@ -800,9 +794,6 @@ class CompareIC: public IC {
// Helper function for determining the state of a compare IC. // Helper function for determining the state of a compare IC.
static State ComputeState(Code* target); static State ComputeState(Code* target);
// Helper function for determining the operation a compare IC is for.
static Token::Value ComputeOperation(Code* target);
static const char* GetStateName(State state); static const char* GetStateName(State state);
private: private:
@ -813,13 +804,7 @@ class CompareIC: public IC {
Condition GetCondition() const { return ComputeCondition(op_); } Condition GetCondition() const { return ComputeCondition(op_); }
State GetState() { return ComputeState(target()); } State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
static void Clear(Address address, Code* target);
Token::Value op_; Token::Value op_;
friend class IC;
}; };
@ -832,8 +817,7 @@ class ToBooleanIC: public IC {
// Helper for BinaryOpIC and CompareIC. // Helper for BinaryOpIC and CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK }; void PatchInlinedSmiCode(Address address);
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
} } // namespace v8::internal } } // namespace v8::internal

26
deps/v8/src/incremental-marking-inl.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
int64_t old_bytes_rescanned = bytes_rescanned_; int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size; bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) { if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
// If we have queued twice the heap size for rescanning then we are // If we have queued twice the heap size for rescanning then we are
// going around in circles, scanning the same objects again and again // going around in circles, scanning the same objects again and again
// as the program mutates the heap faster than we can incrementally // as the program mutates the heap faster than we can incrementally
@ -118,29 +118,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit); WhiteToGrey(obj, mark_bit);
marking_deque_.PushGrey(obj); marking_deque_.PushGrey(obj);
} }
bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) { void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
MarkBit mark_bit = Marking::MarkBitFrom(obj); Marking::WhiteToGrey(mark_bit);
if (!mark_bit.Get()) {
WhiteToGreyAndPush(obj, mark_bit);
return true;
}
return false;
}
bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
return true;
}
return false;
} }

43
deps/v8/src/incremental-marking.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -42,7 +42,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
state_(STOPPED), state_(STOPPED),
marking_deque_memory_(NULL), marking_deque_memory_(NULL),
marking_deque_memory_committed_(false), marking_deque_memory_committed_(false),
marker_(this, heap->mark_compact_collector()),
steps_count_(0), steps_count_(0),
steps_took_(0), steps_took_(0),
longest_step_(0.0), longest_step_(0.0),
@ -664,22 +663,6 @@ void IncrementalMarking::Hurry() {
} else if (map == global_context_map) { } else if (map == global_context_map) {
// Global contexts have weak fields. // Global contexts have weak fields.
VisitGlobalContext(Context::cast(obj), &marking_visitor); VisitGlobalContext(Context::cast(obj), &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else { } else {
obj->Iterate(&marking_visitor); obj->Iterate(&marking_visitor);
} }
@ -824,6 +807,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
Map* map = obj->map(); Map* map = obj->map();
if (map == filler_map) continue; if (map == filler_map) continue;
if (obj->IsMap()) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
}
int size = obj->SizeFromMap(map); int size = obj->SizeFromMap(map);
bytes_to_process -= size; bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map); MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@ -841,22 +830,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
VisitGlobalContext(ctx, &marking_visitor); VisitGlobalContext(ctx, &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else if (map->instance_type() == JS_FUNCTION_TYPE) { } else if (map->instance_type() == JS_FUNCTION_TYPE) {
marking_visitor.VisitPointers( marking_visitor.VisitPointers(
HeapObject::RawField(obj, JSFunction::kPropertiesOffset), HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
@ -978,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() {
int64_t IncrementalMarking::SpaceLeftInOldSpace() { int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
} }
} } // namespace v8::internal } } // namespace v8::internal

15
deps/v8/src/incremental-marking.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -154,6 +154,8 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
// Does white->black or keeps gray or black color. Returns true if converting // Does white->black or keeps gray or black color. Returns true if converting
// white to black. // white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) { inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@ -167,16 +169,6 @@ class IncrementalMarking {
return true; return true;
} }
// Marks the object grey and pushes it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
inline int steps_count() { inline int steps_count() {
return steps_count_; return steps_count_;
} }
@ -268,7 +260,6 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_; VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_; bool marking_deque_memory_committed_;
MarkingDeque marking_deque_; MarkingDeque marking_deque_;
Marker<IncrementalMarking> marker_;
int steps_count_; int steps_count_;
double steps_took_; double steps_took_;

2
deps/v8/src/isolate.h

@ -965,7 +965,7 @@ class Isolate {
// SerializerDeserializer state. // SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400; static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 128; static const int kJSRegexpStaticOffsetsVectorSize = 50;
Address external_callback() { Address external_callback() {
return thread_local_top_.external_callback_; return thread_local_top_.external_callback_;

88
deps/v8/src/jsregexp.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -324,7 +324,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
index))); index)));
if (index == -1) return isolate->factory()->null_value(); if (index == -1) return isolate->factory()->null_value();
} }
ASSERT(last_match_info->HasFastObjectElements()); ASSERT(last_match_info->HasFastElements());
{ {
NoHandleAllocation no_handles; NoHandleAllocation no_handles;
@ -429,7 +429,6 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result = RegExpEngine::CompilationResult result =
RegExpEngine::Compile(&compile_data, RegExpEngine::Compile(&compile_data,
flags.is_ignore_case(), flags.is_ignore_case(),
flags.is_global(),
flags.is_multiline(), flags.is_multiline(),
pattern, pattern,
sample_subject, sample_subject,
@ -516,23 +515,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
} }
int RegExpImpl::GlobalOffsetsVectorSize(Handle<JSRegExp> regexp, RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
int registers_per_match,
int* max_matches) {
#ifdef V8_INTERPRETED_REGEXP
// Global loop in interpreted regexp is not implemented. Therefore we choose
// the size of the offsets vector so that it can only store one match.
*max_matches = 1;
return registers_per_match;
#else // V8_INTERPRETED_REGEXP
int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize);
*max_matches = size / registers_per_match;
return size;
#endif // V8_INTERPRETED_REGEXP
}
int RegExpImpl::IrregexpExecRaw(
Handle<JSRegExp> regexp, Handle<JSRegExp> regexp,
Handle<String> subject, Handle<String> subject,
int index, int index,
@ -634,7 +617,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers, isolate); OffsetsVector registers(required_registers, isolate);
int res = RegExpImpl::IrregexpExecRaw( IrregexpResult res = RegExpImpl::IrregexpExecOnce(
jsregexp, subject, previous_index, Vector<int>(registers.vector(), jsregexp, subject, previous_index, Vector<int>(registers.vector(),
registers.length())); registers.length()));
if (res == RE_SUCCESS) { if (res == RE_SUCCESS) {
@ -2191,12 +2174,15 @@ int ActionNode::EatsAtLeast(int still_to_find,
void ActionNode::FillInBMInfo(int offset, void ActionNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm, BoyerMooreLookahead* bm,
bool not_at_start) { bool not_at_start) {
if (type_ == BEGIN_SUBMATCH) { if (type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset); bm->SetRest(offset);
} else if (type_ != POSITIVE_SUBMATCH_SUCCESS) { } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
on_success()->FillInBMInfo(offset, bm, not_at_start); on_success()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
} }
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
} }
@ -2218,11 +2204,15 @@ int AssertionNode::EatsAtLeast(int still_to_find,
} }
void AssertionNode::FillInBMInfo( void AssertionNode::FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node. // Match the behaviour of EatsAtLeast on this node.
if (type() == AT_START && not_at_start) return; if (type() == AT_START && not_at_start) return;
on_success()->FillInBMInfo(offset, bm, not_at_start); on_success()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
} }
@ -2803,14 +2793,20 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
} }
void LoopChoiceNode::FillInBMInfo( void LoopChoiceNode::FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
if (body_can_be_zero_length_) { int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (body_can_be_zero_length_ ||
recursion_depth > RegExpCompiler::kMaxRecursion ||
budget <= 0) {
bm->SetRest(offset); bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
return; return;
} }
ChoiceNode::FillInBMInfo(offset, bm, not_at_start); ChoiceNode::FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
} }
@ -2912,7 +2908,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least >= 1) { if (eats_at_least >= 1) {
BoyerMooreLookahead* bm = BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler); new BoyerMooreLookahead(eats_at_least, compiler);
FillInBMInfo(0, bm, not_at_start); FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE; if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE; if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
} }
@ -3850,7 +3846,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
BoyerMooreLookahead* bm = BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler); new BoyerMooreLookahead(eats_at_least, compiler);
GuardedAlternative alt0 = alternatives_->at(0); GuardedAlternative alt0 = alternatives_->at(0);
alt0.node()->FillInBMInfo(0, bm, not_at_start); alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler); skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
} }
} else { } else {
@ -5589,8 +5585,11 @@ void Analysis::VisitAssertion(AssertionNode* that) {
} }
void BackReferenceNode::FillInBMInfo( void BackReferenceNode::FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Working out the set of characters that a backreference can match is too // Working out the set of characters that a backreference can match is too
// hard, so we just say that any character can match. // hard, so we just say that any character can match.
bm->SetRest(offset); bm->SetRest(offset);
@ -5602,9 +5601,13 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
RegExpMacroAssembler::kTableSize); RegExpMacroAssembler::kTableSize);
void ChoiceNode::FillInBMInfo( void ChoiceNode::FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
ZoneList<GuardedAlternative>* alts = alternatives(); ZoneList<GuardedAlternative>* alts = alternatives();
budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) { for (int i = 0; i < alts->length(); i++) {
GuardedAlternative& alt = alts->at(i); GuardedAlternative& alt = alts->at(i);
if (alt.guards() != NULL && alt.guards()->length() != 0) { if (alt.guards() != NULL && alt.guards()->length() != 0) {
@ -5612,14 +5615,18 @@ void ChoiceNode::FillInBMInfo(
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
return; return;
} }
alt.node()->FillInBMInfo(offset, bm, not_at_start); alt.node()->FillInBMInfo(
offset, recursion_depth + 1, budget, bm, not_at_start);
} }
SaveBMInfo(bm, not_at_start, offset); SaveBMInfo(bm, not_at_start, offset);
} }
void TextNode::FillInBMInfo( void TextNode::FillInBMInfo(int initial_offset,
int initial_offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (initial_offset >= bm->length()) return; if (initial_offset >= bm->length()) return;
int offset = initial_offset; int offset = initial_offset;
int max_char = bm->max_char(); int max_char = bm->max_char();
@ -5673,6 +5680,8 @@ void TextNode::FillInBMInfo(
return; return;
} }
on_success()->FillInBMInfo(offset, on_success()->FillInBMInfo(offset,
recursion_depth + 1,
budget - 1,
bm, bm,
true); // Not at start after a text node. true); // Not at start after a text node.
if (initial_offset == 0) set_bm_info(not_at_start, bm); if (initial_offset == 0) set_bm_info(not_at_start, bm);
@ -5797,7 +5806,6 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile( RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpCompileData* data, RegExpCompileData* data,
bool ignore_case, bool ignore_case,
bool is_global,
bool is_multiline, bool is_multiline,
Handle<String> pattern, Handle<String> pattern,
Handle<String> sample_subject, Handle<String> sample_subject,
@ -5901,8 +5909,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.SetCurrentPositionFromEnd(max_length); macro_assembler.SetCurrentPositionFromEnd(max_length);
} }
macro_assembler.set_global(is_global);
return compiler.Assemble(&macro_assembler, return compiler.Assemble(&macro_assembler,
node, node,
data->capture_count, data->capture_count,

100
deps/v8/src/jsregexp.h

@ -109,19 +109,13 @@ class RegExpImpl {
static int IrregexpPrepare(Handle<JSRegExp> regexp, static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject); Handle<String> subject);
// Calculate the size of offsets vector for the case of global regexp // Execute a regular expression once on the subject, starting from
// and the number of matches this vector is able to store. // character "index".
static int GlobalOffsetsVectorSize(Handle<JSRegExp> regexp, // If successful, returns RE_SUCCESS and set the capture positions
int registers_per_match, // in the first registers.
int* max_matches);
// Execute a regular expression on the subject, starting from index.
// If matching succeeds, return the number of matches. This can be larger
// than one in the case of global regular expressions.
// The captures and subcaptures are stored into the registers vector.
// If matching fails, returns RE_FAILURE. // If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION. // If execution fails, sets a pending exception and returns RE_EXCEPTION.
static int IrregexpExecRaw(Handle<JSRegExp> regexp, static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject, Handle<String> subject,
int index, int index,
Vector<int> registers); Vector<int> registers);
@ -580,9 +574,14 @@ class RegExpNode: public ZoneObject {
// Collects information on the possible code units (mod 128) that can match if // Collects information on the possible code units (mod 128) that can match if
// we look forward. This is used for a Boyer-Moore-like string searching // we look forward. This is used for a Boyer-Moore-like string searching
// implementation. TODO(erikcorry): This should share more code with // implementation. TODO(erikcorry): This should share more code with
// EatsAtLeast, GetQuickCheckDetails. // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
virtual void FillInBMInfo( // the number of nodes we are willing to look at in order to create this data.
int offset, BoyerMooreLookahead* bm, bool not_at_start) { static const int kFillInBMBudget = 200;
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
UNREACHABLE(); UNREACHABLE();
} }
@ -681,9 +680,13 @@ class SeqRegExpNode: public RegExpNode {
RegExpNode* on_success() { return on_success_; } RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; } void set_on_success(RegExpNode* node) { on_success_ = node; }
virtual RegExpNode* FilterASCII(int depth); virtual RegExpNode* FilterASCII(int depth);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
on_success_->FillInBMInfo(offset, bm, not_at_start); int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
on_success_->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm); if (offset == 0) set_bm_info(not_at_start, bm);
} }
@ -736,8 +739,11 @@ class ActionNode: public SeqRegExpNode {
return on_success()->GetQuickCheckDetails( return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start); details, compiler, filled_in, not_at_start);
} }
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
Type type() { return type_; } Type type() { return type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops. // TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; } virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@ -805,8 +811,11 @@ class TextNode: public SeqRegExpNode {
virtual int GreedyLoopTextLength(); virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode( virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler); RegExpCompiler* compiler);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
void CalculateOffsets(); void CalculateOffsets();
virtual RegExpNode* FilterASCII(int depth); virtual RegExpNode* FilterASCII(int depth);
@ -865,8 +874,11 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler, RegExpCompiler* compiler,
int filled_in, int filled_in,
bool not_at_start); bool not_at_start);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
AssertionNodeType type() { return type_; } AssertionNodeType type() { return type_; }
void set_type(AssertionNodeType type) { type_ = type; } void set_type(AssertionNodeType type) { type_ = type; }
@ -903,8 +915,11 @@ class BackReferenceNode: public SeqRegExpNode {
bool not_at_start) { bool not_at_start) {
return; return;
} }
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
private: private:
int start_reg_; int start_reg_;
@ -928,8 +943,11 @@ class EndNode: public RegExpNode {
// Returning 0 from EatsAtLeast should ensure we never get here. // Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE(); UNREACHABLE();
} }
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Returning 0 from EatsAtLeast should ensure we never get here. // Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE(); UNREACHABLE();
} }
@ -1018,8 +1036,11 @@ class ChoiceNode: public RegExpNode {
RegExpCompiler* compiler, RegExpCompiler* compiler,
int characters_filled_in, int characters_filled_in,
bool not_at_start); bool not_at_start);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
bool being_calculated() { return being_calculated_; } bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; } bool not_at_start() { return not_at_start_; }
@ -1068,9 +1089,13 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
RegExpCompiler* compiler, RegExpCompiler* compiler,
int characters_filled_in, int characters_filled_in,
bool not_at_start); bool not_at_start);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start) { int recursion_depth,
alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start); int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
alternatives_->at(1).node()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm); if (offset == 0) set_bm_info(not_at_start, bm);
} }
// For a negative lookahead we don't emit the quick check for the // For a negative lookahead we don't emit the quick check for the
@ -1100,8 +1125,11 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler, RegExpCompiler* compiler,
int characters_filled_in, int characters_filled_in,
bool not_at_start); bool not_at_start);
virtual void FillInBMInfo( virtual void FillInBMInfo(int offset,
int offset, BoyerMooreLookahead* bm, bool not_at_start); int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
RegExpNode* loop_node() { return loop_node_; } RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; } RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; } bool body_can_be_zero_length() { return body_can_be_zero_length_; }
@ -1551,7 +1579,6 @@ class RegExpEngine: public AllStatic {
static CompilationResult Compile(RegExpCompileData* input, static CompilationResult Compile(RegExpCompileData* input,
bool ignore_case, bool ignore_case,
bool global,
bool multiline, bool multiline,
Handle<String> pattern, Handle<String> pattern,
Handle<String> sample_subject, Handle<String> sample_subject,
@ -1580,8 +1607,7 @@ class OffsetsVector {
inline int* vector() { return vector_; } inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; } inline int length() { return offsets_vector_length_; }
static const int kStaticOffsetsVectorSize = static const int kStaticOffsetsVectorSize = 50;
Isolate::kJSRegexpStaticOffsetsVectorSize;
private: private:
static Address static_offsets_vector_address(Isolate* isolate) { static Address static_offsets_vector_address(Isolate* isolate) {

8
deps/v8/src/list-inl.h

@ -136,14 +136,6 @@ bool List<T, P>::RemoveElement(const T& elm) {
} }
template<typename T, class P>
void List<T, P>::Allocate(int length) {
DeleteData(data_);
Initialize(length);
length_ = length;
}
template<typename T, class P> template<typename T, class P>
void List<T, P>::Clear() { void List<T, P>::Clear() {
DeleteData(data_); DeleteData(data_);

3
deps/v8/src/list.h

@ -117,9 +117,6 @@ class List {
// pointer type. Returns the removed element. // pointer type. Returns the removed element.
INLINE(T RemoveLast()) { return Remove(length_ - 1); } INLINE(T RemoveLast()) { return Remove(length_ - 1); }
// Deletes current list contents and allocates space for 'length' elements.
INLINE(void Allocate(int length));
// Clears the list by setting the length to zero. Even if T is a // Clears the list by setting the length to zero. Even if T is a
// pointer type, clearing the list doesn't delete the entries. // pointer type, clearing the list doesn't delete the entries.
INLINE(void Clear()); INLINE(void Clear());

7
deps/v8/src/lithium.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -225,12 +225,9 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return 2; return 2;
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return 3; return 3;
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2; return kPointerSizeLog2;

60
deps/v8/src/liveedit.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -30,7 +30,6 @@
#include "liveedit.h" #include "liveedit.h"
#include "code-stubs.h"
#include "compilation-cache.h" #include "compilation-cache.h"
#include "compiler.h" #include "compiler.h"
#include "debug.h" #include "debug.h"
@ -1476,36 +1475,26 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Check the nature of the top frame. // Check the nature of the top frame.
Isolate* isolate = Isolate::Current(); Isolate* isolate = Isolate::Current();
Code* pre_top_frame_code = pre_top_frame->LookupCode(); Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() && if (pre_top_frame_code->is_inline_cache_stub() &&
pre_top_frame_code->ic_state() == DEBUG_BREAK) { pre_top_frame_code->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls. // OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL; *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code == } else if (pre_top_frame_code ==
isolate->debug()->debug_break_slot()) { isolate->debug()->debug_break_slot()) {
// OK, we can drop debug break slot. // OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL; *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code == } else if (pre_top_frame_code ==
isolate->builtins()->builtin( isolate->builtins()->builtin(
Builtins::kFrameDropper_LiveEdit)) { Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code. // OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL; *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
frame_has_padding = false;
} else if (pre_top_frame_code == } else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) { isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
*mode = Debug::FRAME_DROPPED_IN_RETURN_CALL; *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code->kind() == Code::STUB && } else if (pre_top_frame_code->kind() == Code::STUB &&
pre_top_frame_code->major_key() == CodeStub::CEntry) { pre_top_frame_code->major_key()) {
// Entry from our unit tests on 'debugger' statement. // Entry from our unit tests, it's fine, we support this case.
// It's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL; *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
// We don't have a padding from 'debugger' statement call.
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
frame_has_padding = false;
} else { } else {
return "Unknown structure of stack above changing function"; return "Unknown structure of stack above changing function";
} }
@ -1515,50 +1504,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
- Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame. - Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
+ kPointerSize; // Bigger address end is exclusive. + kPointerSize; // Bigger address end is exclusive.
Address* top_frame_pc_address = top_frame->pc_address();
// top_frame may be damaged below this point. Do not used it.
ASSERT(!(top_frame = NULL));
if (unused_stack_top > unused_stack_bottom) { if (unused_stack_top > unused_stack_bottom) {
if (frame_has_padding) {
int shortage_bytes =
static_cast<int>(unused_stack_top - unused_stack_bottom);
Address padding_start = pre_top_frame->fp() -
Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
Address padding_pointer = padding_start;
Smi* padding_object =
Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
while (Memory::Object_at(padding_pointer) == padding_object) {
padding_pointer -= kPointerSize;
}
int padding_counter =
Smi::cast(Memory::Object_at(padding_pointer))->value();
if (padding_counter * kPointerSize < shortage_bytes) {
return "Not enough space for frame dropper frame "
"(even with padding frame)";
}
Memory::Object_at(padding_pointer) =
Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
memmove(padding_start + kPointerSize - shortage_bytes,
padding_start + kPointerSize,
Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
unused_stack_top -= shortage_bytes;
STATIC_ASSERT(sizeof(Address) == kPointerSize);
top_frame_pc_address -= shortage_bytes / kPointerSize;
} else {
return "Not enough space for frame dropper frame"; return "Not enough space for frame dropper frame";
} }
}
// Committing now. After this point we should return only NULL value. // Committing now. After this point we should return only NULL value.
@ -1567,7 +1515,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame)); ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit(); Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
*top_frame_pc_address = code->entry(); top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp()); pre_top_frame->SetCallerFp(bottom_js_frame->fp());
*restarter_frame_function_pointer = *restarter_frame_function_pointer =

28
deps/v8/src/mark-compact-inl.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -52,15 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) {
} }
bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
if (MarkObjectWithoutPush(obj)) {
marking_deque_.PushBlack(obj);
return true;
}
return false;
}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit); ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) { if (!mark_bit.Get()) {
@ -71,13 +62,16 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
} }
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) { bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
MarkBit mark_bit = Marking::MarkBitFrom(obj); MarkBit mark = Marking::MarkBitFrom(object);
if (!mark_bit.Get()) { bool old_mark = mark.Get();
SetMark(obj, mark_bit); if (!old_mark) SetMark(object, mark);
return true; return old_mark;
} }
return false;
void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
} }

224
deps/v8/src/mark-compact.cc

@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
abort_incremental_marking_(false), abort_incremental_marking_(false),
compacting_(false), compacting_(false),
was_marked_incrementally_(false), was_marked_incrementally_(false),
collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false), flush_monomorphic_ics_(false),
tracer_(NULL), tracer_(NULL),
migration_slots_buffer_(NULL), migration_slots_buffer_(NULL),
heap_(NULL), heap_(NULL),
code_flusher_(NULL), code_flusher_(NULL),
encountered_weak_maps_(NULL), encountered_weak_maps_(NULL) { }
marker_(this, this) { }
#ifdef DEBUG #ifdef DEBUG
@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects(); MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped()); ASSERT(heap_->incremental_marking()->IsStopped());
if (FLAG_collect_maps) ClearNonLiveTransitions(); if (collect_maps_) ClearNonLiveTransitions();
ClearWeakMaps(); ClearWeakMaps();
@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces(); SweepSpaces();
if (!FLAG_collect_maps) ReattachInitialMaps(); if (!collect_maps_) ReattachInitialMaps();
Finish(); Finish();
@ -658,6 +658,11 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) { void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
// Disable collection of maps if incremental marking is enabled.
// Map collection algorithm relies on a special map transition tree traversal
// order which is not implemented for incremental marking.
collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
// Monomorphic ICs are preserved when possible, but need to be flushed // Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about // when they might be keeping a Context alive, or when the heap is about
// to be serialized. // to be serialized.
@ -675,6 +680,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact); ASSERT(!FLAG_never_compact || !FLAG_always_compact);
if (collect_maps_) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE #ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) { if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction. // If GDBJIT interface is active disable compaction.
@ -1180,7 +1186,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap(); Heap* heap = map->GetHeap();
Code* code = reinterpret_cast<Code*>(object); Code* code = reinterpret_cast<Code*>(object);
if (FLAG_cleanup_code_caches_at_gc) { if (FLAG_cleanup_code_caches_at_gc) {
code->ClearTypeFeedbackCells(heap); Object* raw_info = code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
ASSERT(type_feedback_cells->AstId(i)->IsSmi());
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
}
} }
code->CodeIterateBody<StaticMarkingVisitor>(heap); code->CodeIterateBody<StaticMarkingVisitor>(heap);
} }
@ -1793,11 +1808,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
heap_->ClearCacheOnMap(map); heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions // When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak. Only maps for subclasses // in a special way to make transition links weak.
// of JSReceiver can have transitions. // Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map); MarkMapContents(map);
} else { } else {
marking_deque_.PushBlack(map); marking_deque_.PushBlack(map);
} }
@ -1807,86 +1822,79 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
} }
// Force instantiation of template instances. void MarkCompactCollector::MarkMapContents(Map* map) {
template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
template <class T>
void Marker<T>::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack. // Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype // This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions. // transitions in ClearNonLiveTransitions.
Object** proto_trans_slot = FixedArray* prototype_transitions = map->prototype_transitions();
HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
if (prototype_transitions->IsFixedArray()) {
mark_compact_collector()->RecordSlot(proto_trans_slot,
proto_trans_slot,
prototype_transitions);
MarkBit mark = Marking::MarkBitFrom(prototype_transitions); MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
if (!mark.Get()) { if (!mark.Get()) {
mark.Set(); mark.Set();
MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
prototype_transitions->Size()); prototype_transitions->Size());
} }
}
// Make sure that the back pointer stored either in the map itself or inside
// its prototype transitions array is marked. Treat pointers in the descriptor
// array as weak and also mark that array to prevent visiting it later.
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
Object** descriptor_array_slot = Object** raw_descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
Object* descriptor_array = *descriptor_array_slot; Object* raw_descriptor_array = *raw_descriptor_array_slot;
if (!descriptor_array->IsSmi()) { if (!raw_descriptor_array->IsSmi()) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array)); MarkDescriptorArray(
} reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
// Mark the Object* fields of the Map. Since the descriptor array has been
// marked already, it is fine that one of these fields contains a pointer
// to it. But make sure to skip back pointer and prototype transitions.
STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
Object** start_slot = HeapObject::RawField(
map, Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(
map, Map::kPrototypeTransitionsOrBackPointerOffset);
for (Object** slot = start_slot; slot < end_slot; slot++) {
Object* obj = *slot;
if (!obj->NonFailureIsHeapObject()) continue;
mark_compact_collector()->RecordSlot(start_slot, slot, obj);
base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
} }
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
Object** start_slot = HeapObject::RawField(map,
Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
RecordSlot(slot, slot, accessor);
MarkObjectAndPush(accessor);
} }
template <class T> void MarkCompactCollector::MarkDescriptorArray(
void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) { DescriptorArray* descriptors) {
MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
if (descriptors_mark.Get()) return;
// Empty descriptor array is marked as a root before any maps are marked. // Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array()); ASSERT(descriptors != heap()->empty_descriptor_array());
SetMark(descriptors, descriptors_mark);
// The DescriptorArray contains a pointer to its contents array, but the FixedArray* contents = reinterpret_cast<FixedArray*>(
// contents array will be marked black and hence not be visited again.
if (!base_marker()->MarkObjectAndPush(descriptors)) return;
FixedArray* contents = FixedArray::cast(
descriptors->get(DescriptorArray::kContentArrayIndex)); descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!IsMarked(contents));
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2); ASSERT(contents->length() >= 2);
ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents))); MarkBit contents_mark = Marking::MarkBitFrom(contents);
base_marker()->MarkObjectWithoutPush(contents); SetMark(contents, contents_mark);
// Contents contains (value, details) pairs. If the details say that the type
// Contents contains (value, details) pairs. If the descriptor contains a // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// transition (value is a Map), we don't mark the value as live. It might // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
// be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later. // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
// CONSTANT_TRANSITION is the value an Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) { for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1))); PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i; Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue; if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot); HeapObject* value = HeapObject::cast(*slot);
mark_compact_collector()->RecordSlot(slot, slot, *slot); RecordSlot(slot, slot, *slot);
switch (details.type()) { switch (details.type()) {
case NORMAL: case NORMAL:
@ -1894,22 +1902,21 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
case CONSTANT_FUNCTION: case CONSTANT_FUNCTION:
case HANDLER: case HANDLER:
case INTERCEPTOR: case INTERCEPTOR:
base_marker()->MarkObjectAndPush(value); MarkObjectAndPush(value);
break; break;
case CALLBACKS: case CALLBACKS:
if (!value->IsAccessorPair()) { if (!value->IsAccessorPair()) {
base_marker()->MarkObjectAndPush(value); MarkObjectAndPush(value);
} else if (base_marker()->MarkObjectWithoutPush(value)) { } else if (!MarkObjectWithoutPush(value)) {
AccessorPair* accessors = AccessorPair::cast(value); MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset); MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
} }
break; break;
case ELEMENTS_TRANSITION: case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are // For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps // stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to. // that it refers to.
if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value); if (value->IsFixedArray()) MarkObjectWithoutPush(value);
break; break;
case MAP_TRANSITION: case MAP_TRANSITION:
case CONSTANT_TRANSITION: case CONSTANT_TRANSITION:
@ -1917,16 +1924,26 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
break; break;
} }
} }
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
marking_deque_.PushBlack(descriptors);
} }
template <class T> void MarkCompactCollector::CreateBackPointers() {
void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) { HeapObjectIterator iterator(heap()->map_space());
Object** slot = HeapObject::RawField(accessors, offset); for (HeapObject* next_object = iterator.Next();
HeapObject* accessor = HeapObject::cast(*slot); next_object != NULL; next_object = iterator.Next()) {
if (accessor->IsMap()) return; if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
mark_compact_collector()->RecordSlot(slot, slot, accessor); Map* map = Map::cast(next_object);
base_marker()->MarkObjectAndPush(accessor); STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
}
}
}
} }
@ -2453,8 +2470,15 @@ void MarkCompactCollector::ReattachInitialMaps() {
void MarkCompactCollector::ClearNonLiveTransitions() { void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(heap()->map_space()); HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from // Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action // a marked map to an unmarked map to null transitions. At the same time,
// is carried out only on maps of JSObjects and related subtypes. // set all the prototype fields of maps back to their original value,
// dropping the back pointers temporarily stored in the prototype field.
// Setting the prototype field requires following the linked list of
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
for (HeapObject* obj = map_iterator.Next(); for (HeapObject* obj = map_iterator.Next();
obj != NULL; obj = map_iterator.Next()) { obj != NULL; obj = map_iterator.Next()) {
Map* map = reinterpret_cast<Map*>(obj); Map* map = reinterpret_cast<Map*>(obj);
@ -2530,16 +2554,36 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
MarkBit map_mark) { MarkBit map_mark) {
Object* potential_parent = map->GetBackPointer(); // Follow the chain of back pointers to find the prototype.
if (!potential_parent->IsMap()) return; Object* real_prototype = map;
Map* parent = Map::cast(potential_parent); while (real_prototype->IsMap()) {
real_prototype = Map::cast(real_prototype)->prototype();
ASSERT(real_prototype->IsHeapObject());
}
// Follow back pointer, check whether we are dealing with a map transition // Follow back pointers, setting them to prototype, clearing map transitions
// from a live map to a dead path and in case clear transitions of parent. // when necessary.
Map* current = map;
bool current_is_alive = map_mark.Get(); bool current_is_alive = map_mark.Get();
bool parent_is_alive = Marking::MarkBitFrom(parent).Get(); bool on_dead_path = !current_is_alive;
if (!current_is_alive && parent_is_alive) { while (current->IsMap()) {
parent->ClearNonLiveTransitions(heap()); Object* next = current->prototype();
// There should never be a dead map above a live map.
ASSERT(on_dead_path || current_is_alive);
// A live map above a dead map indicates a dead transition. This test will
// always be false on the first iteration.
if (on_dead_path && current_is_alive) {
on_dead_path = false;
current->ClearNonLiveTransitions(heap(), real_prototype);
}
Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
*slot = real_prototype;
if (current_is_alive) RecordSlot(slot, slot, real_prototype);
current = reinterpret_cast<Map*>(next);
current_is_alive = Marking::MarkBitFrom(current).Get();
} }
} }
@ -2738,9 +2782,7 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
// We have to zap this pointer, because the store buffer may overflow later, // We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find // and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space. // spurious newspace pointers in the old space.
// TODO(mstarzinger): This was changed to a sentinel value to track down *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
// rare crashes, change it back to Smi::FromInt(0) later.
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
} }
} }
@ -3796,7 +3838,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false; bool lazy_sweeping_active = false;
bool unused_page_present = false; bool unused_page_present = false;
intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects(); intptr_t old_space_size = heap()->PromotedSpaceSize();
intptr_t space_left = intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size), Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size; heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;

64
deps/v8/src/mark-compact.h

@ -42,7 +42,6 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations. // Forward declarations.
class CodeFlusher; class CodeFlusher;
class GCTracer; class GCTracer;
class MarkCompactCollector;
class MarkingVisitor; class MarkingVisitor;
class RootMarkingVisitor; class RootMarkingVisitor;
@ -167,6 +166,7 @@ class Marking {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Marking deque for tracing live objects. // Marking deque for tracing live objects.
class MarkingDeque { class MarkingDeque {
public: public:
MarkingDeque() MarkingDeque()
@ -383,34 +383,6 @@ class SlotsBuffer {
}; };
// -------------------------------------------------------------------------
// Marker shared between incremental and non-incremental marking
template<class BaseMarker> class Marker {
public:
Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
: base_marker_(base_marker),
mark_compact_collector_(mark_compact_collector) {}
// Mark pointers in a Map and its DescriptorArray together, possibly
// treating transitions or back pointers weak.
void MarkMapContents(Map* map);
void MarkDescriptorArray(DescriptorArray* descriptors);
void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
private:
BaseMarker* base_marker() {
return base_marker_;
}
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_;
}
BaseMarker* base_marker_;
MarkCompactCollector* mark_compact_collector_;
};
// Defined in isolate.h. // Defined in isolate.h.
class ThreadLocalTop; class ThreadLocalTop;
@ -612,6 +584,8 @@ class MarkCompactCollector {
bool was_marked_incrementally_; bool was_marked_incrementally_;
bool collect_maps_;
bool flush_monomorphic_ics_; bool flush_monomorphic_ics_;
// A pointer to the current stack-allocated GC tracer object during a full // A pointer to the current stack-allocated GC tracer object during a full
@ -634,13 +608,12 @@ class MarkCompactCollector {
// //
// After: Live objects are marked and non-live objects are unmarked. // After: Live objects are marked and non-live objects are unmarked.
friend class RootMarkingVisitor; friend class RootMarkingVisitor;
friend class MarkingVisitor; friend class MarkingVisitor;
friend class StaticMarkingVisitor; friend class StaticMarkingVisitor;
friend class CodeMarkingVisitor; friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor; friend class SharedFunctionInfoMarkingVisitor;
friend class Marker<IncrementalMarking>;
friend class Marker<MarkCompactCollector>;
// Mark non-optimize code for functions inlined into the given optimized // Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed. // code. This will prevent it from being flushed.
@ -658,25 +631,29 @@ class MarkCompactCollector {
void AfterMarking(); void AfterMarking();
// Marks the object black and pushes it on the marking stack. // Marks the object black and pushes it on the marking stack.
// Returns true if object needed marking and false otherwise. // This is for non-incremental marking.
// This is for non-incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
// Marks the object black without pushing it on the marking stack. INLINE(bool MarkObjectWithoutPush(HeapObject* object));
// Returns true if object needed marking and false otherwise. INLINE(void MarkObjectAndPush(HeapObject* value));
// This is for non-incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
// Marks the object black assuming that it is not yet marked. // Marks the object black. This is for non-incremental marking.
// This is for non-incremental marking only.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
void ProcessNewlyMarkedObject(HeapObject* obj); void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
void MarkMapContents(Map* map);
void MarkAccessorPairSlot(HeapObject* accessors, int offset);
void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them. // Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor); void MarkRoots(RootMarkingVisitor* visitor);
@ -779,7 +756,6 @@ class MarkCompactCollector {
MarkingDeque marking_deque_; MarkingDeque marking_deque_;
CodeFlusher* code_flusher_; CodeFlusher* code_flusher_;
Object* encountered_weak_maps_; Object* encountered_weak_maps_;
Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_; List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_; List<Code*> invalidated_code_;

230
deps/v8/src/messages.js

@ -61,21 +61,18 @@ function FormatString(format, message) {
// To check if something is a native error we need to check the // To check if something is a native error we need to check the
// concrete native error types. It is not sufficient to use instanceof // concrete native error types. It is not enough to check "obj
// since it possible to create an object that has Error.prototype on // instanceof $Error" because user code can replace
// its prototype chain. This is the case for DOMException for example. // NativeError.prototype.__proto__. User code cannot replace
// NativeError.prototype though and therefore this is a safe test.
function IsNativeErrorObject(obj) { function IsNativeErrorObject(obj) {
switch (%_ClassOf(obj)) { return (obj instanceof $Error) ||
case 'Error': (obj instanceof $EvalError) ||
case 'EvalError': (obj instanceof $RangeError) ||
case 'RangeError': (obj instanceof $ReferenceError) ||
case 'ReferenceError': (obj instanceof $SyntaxError) ||
case 'SyntaxError': (obj instanceof $TypeError) ||
case 'TypeError': (obj instanceof $URIError);
case 'URIError':
return true;
}
return false;
} }
@ -748,7 +745,7 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) { function GetStackTraceLine(recv, fun, pos, isGlobal) {
return new CallSite(recv, fun, pos).toString(); return FormatSourcePosition(new CallSite(recv, fun, pos));
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@ -788,7 +785,15 @@ function CallSiteGetThis() {
} }
function CallSiteGetTypeName() { function CallSiteGetTypeName() {
return GetTypeName(this, false); var constructor = this.receiver.constructor;
if (!constructor) {
return %_CallFunction(this.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return %_CallFunction(this.receiver, ObjectToString);
}
return constructorName;
} }
function CallSiteIsToplevel() { function CallSiteIsToplevel() {
@ -822,10 +827,8 @@ function CallSiteGetFunctionName() {
var name = this.fun.name; var name = this.fun.name;
if (name) { if (name) {
return name; return name;
} } else {
name = %FunctionGetInferredName(this.fun); return %FunctionGetInferredName(this.fun);
if (name) {
return name;
} }
// Maybe this is an evaluation? // Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun); var script = %FunctionGetScript(this.fun);
@ -916,69 +919,6 @@ function CallSiteIsConstructor() {
return this.fun === constructor; return this.fun === constructor;
} }
function CallSiteToString() {
var fileName;
var fileLocation = "";
if (this.isNative()) {
fileLocation = "native";
} else if (this.isEval()) {
fileName = this.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = this.getEvalOrigin();
}
} else {
fileName = this.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = this.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = this.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = this.getFunctionName();
var addSuffix = true;
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
var typeName = GetTypeName(this, true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName && functionName.indexOf(typeName) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName && functionName.lastIndexOf("." + methodName) !=
functionName.length - methodName.length - 1) {
line += " [as " + methodName + "]";
}
} else {
line += typeName + "." + (methodName || "<anonymous>");
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addSuffix = false;
}
if (addSuffix) {
line += " (" + fileLocation + ")";
}
return line;
}
SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array( SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getThis", CallSiteGetThis, "getThis", CallSiteGetThis,
"getTypeName", CallSiteGetTypeName, "getTypeName", CallSiteGetTypeName,
@ -994,8 +934,7 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getColumnNumber", CallSiteGetColumnNumber, "getColumnNumber", CallSiteGetColumnNumber,
"isNative", CallSiteIsNative, "isNative", CallSiteIsNative,
"getPosition", CallSiteGetPosition, "getPosition", CallSiteGetPosition,
"isConstructor", CallSiteIsConstructor, "isConstructor", CallSiteIsConstructor
"toString", CallSiteToString
)); ));
@ -1037,6 +976,65 @@ function FormatEvalOrigin(script) {
return eval_origin; return eval_origin;
} }
function FormatSourcePosition(frame) {
var fileName;
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
fileName = frame.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = frame.getEvalOrigin();
}
} else {
fileName = frame.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = frame.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = frame.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = frame.getFunction().name;
var addPrefix = true;
var isConstructor = frame.isConstructor();
var isMethodCall = !(frame.isToplevel() || isConstructor);
if (isMethodCall) {
var methodName = frame.getMethodName();
line += frame.getTypeName() + ".";
if (functionName) {
line += functionName;
if (methodName && (methodName != functionName)) {
line += " [as " + methodName + "]";
}
} else {
line += methodName || "<anonymous>";
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addPrefix = false;
}
if (addPrefix) {
line += " (" + fileLocation + ")";
}
return line;
}
function FormatStackTrace(error, frames) { function FormatStackTrace(error, frames) {
var lines = []; var lines = [];
try { try {
@ -1052,7 +1050,7 @@ function FormatStackTrace(error, frames) {
var frame = frames[i]; var frame = frames[i];
var line; var line;
try { try {
line = frame.toString(); line = FormatSourcePosition(frame);
} catch (e) { } catch (e) {
try { try {
line = "<error: " + e + ">"; line = "<error: " + e + ">";
@ -1083,19 +1081,6 @@ function FormatRawStackTrace(error, raw_stack) {
} }
} }
function GetTypeName(obj, requireConstructor) {
var constructor = obj.receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
return constructorName;
}
function captureStackTrace(obj, cons_opt) { function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit; var stackTraceLimit = $Error.stackTraceLimit;
@ -1140,7 +1125,13 @@ function SetUpError() {
} }
%FunctionSetInstanceClassName(f, 'Error'); %FunctionSetInstanceClassName(f, 'Error');
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM); %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
%SetProperty(f.prototype, "name", name, DONT_ENUM); // The name property on the prototype of error objects is not
// specified as being read-one and dont-delete. However, allowing
// overwriting allows leaks of error objects between script blocks
// in the same context in a browser setting. Therefore we fix the
// name.
%SetProperty(f.prototype, "name", name,
DONT_ENUM | DONT_DELETE | READ_ONLY) ;
%SetCode(f, function(m) { %SetCode(f, function(m) {
if (%_IsConstructCall()) { if (%_IsConstructCall()) {
// Define all the expected properties directly on the error // Define all the expected properties directly on the error
@ -1156,8 +1147,10 @@ function SetUpError() {
return FormatMessage(%NewMessageObject(obj.type, obj.arguments)); return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
}); });
} else if (!IS_UNDEFINED(m)) { } else if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty( %IgnoreAttributesAndSetProperty(this,
this, 'message', ToString(m), DONT_ENUM); 'message',
ToString(m),
DONT_ENUM);
} }
captureStackTrace(this, f); captureStackTrace(this, f);
} else { } else {
@ -1187,41 +1180,16 @@ $Error.captureStackTrace = captureStackTrace;
var visited_errors = new InternalArray(); var visited_errors = new InternalArray();
var cyclic_error_marker = new $Object(); var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
// Climb the prototype chain until we find the holder.
while (error && !%HasLocalProperty(error, name)) {
error = error.__proto__;
}
if (error === null) return void 0;
if (!IS_OBJECT(error)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
// http://code.google.com/p/chromium/issues/detail?id=69187
var desc = %GetOwnProperty(error, name);
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
if (error === $ReferenceError.prototype)
return isName ? "ReferenceError" : void 0;
if (error === $SyntaxError.prototype)
return isName ? "SyntaxError" : void 0;
if (error === $TypeError.prototype)
return isName ? "TypeError" : void 0;
}
// Otherwise, read normally.
return error[name];
}
function ErrorToStringDetectCycle(error) { function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker; if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try { try {
var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type"); var type = error.type;
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name"); var name = error.name;
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name); name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message"); var message = error.message;
var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty); var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
if (type && !hasMessage) { if (type && !hasMessage) {
var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments"); message = FormatMessage(%NewMessageObject(type, error.arguments));
message = FormatMessage(%NewMessageObject(type, args));
} }
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message); message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message; if (name === "") return message;

9
deps/v8/src/mips/builtins-mips.cc

@ -118,7 +118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) { Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements; const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0); STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false); __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the // Allocate the JSArray object together with space for a fixed array with the
// requested elements. // requested elements.
@ -214,8 +214,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole, bool fill_with_hole,
Label* gc_required) { Label* gc_required) {
// Load the initial map from the array function. // Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero. if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert( __ Assert(
@ -450,10 +449,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code); __ Branch(call_generic_code);
__ bind(&not_double); __ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray // a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
a2, a2,
t5, t5,

38
deps/v8/src/mips/code-stubs-mips.cc

@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2); 1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer). // Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9; const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4; const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
@ -5054,33 +5054,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// allocating space for the c argument slots, we don't need to calculate // allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will // that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment): // look (sp meaning the value of sp at this moment):
// [sp + 5] - Argument 9
// [sp + 4] - Argument 8 // [sp + 4] - Argument 8
// [sp + 3] - Argument 7 // [sp + 3] - Argument 7
// [sp + 2] - Argument 6 // [sp + 2] - Argument 6
// [sp + 1] - Argument 5 // [sp + 1] - Argument 5
// [sp + 0] - saved ra // [sp + 0] - saved ra
// Argument 9: Pass current isolate address. // Argument 8: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots. // CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address())); __ li(a0, Operand(ExternalReference::isolate_address()));
__ sw(a0, MemOperand(sp, 5 * kPointerSize)); __ sw(a0, MemOperand(sp, 4 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript. // Argument 7: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1)); __ li(a0, Operand(1));
__ sw(a0, MemOperand(sp, 4 * kPointerSize)); __ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 7: Start (high end) of backtracking stack memory area. // Argument 6: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address)); __ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0)); __ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size)); __ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0)); __ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2); __ addu(a0, a0, a2);
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize)); __ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer. // Argument 5: static offsets vector buffer.
@ -5131,9 +5125,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result. // Check the result.
Label success; Label success;
__ Branch(&success, eq, v0, Operand(1)); __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
Label failure; Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system. // If not exception it can only be retry. Handle that in the runtime system.
@ -7370,8 +7362,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement. // KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET }, { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET }, { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateMapChangeElementTransition // ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject // and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET }, { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET }, { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@ -7637,9 +7629,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements; Label fast_elements;
__ CheckFastElements(a2, t1, &double_elements); __ CheckFastElements(a2, t1, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(a0, &smi_element); __ JumpIfSmi(a0, &smi_element);
__ CheckFastSmiElements(a2, t1, &fast_elements); __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
// Store into the array literal requires a elements transition. Call into // Store into the array literal requires a elements transition. Call into
// the runtime. // the runtime.
@ -7651,7 +7643,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(t1, t0); __ Push(t1, t0);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements); __ bind(&fast_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7664,8 +7656,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT); __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// and value is Smi. // FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element); __ bind(&smi_element);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7674,7 +7666,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT); __ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); __ mov(v0, a0);
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements); __ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2, __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,

4
deps/v8/src/mips/codegen-mips.cc

@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Code generators // Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) { MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value
@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
} }
void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) { MacroAssembler* masm, Label* fail) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- a0 : value // -- a0 : value

4
deps/v8/src/mips/debug-mips.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -116,8 +116,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions); Assembler::kDebugBreakSlotInstructions);
} }
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)

8
deps/v8/src/mips/full-codegen-mips.cc

@ -1711,8 +1711,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length()); ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind = ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value()); static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values( Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1))); FixedArrayBase::cast(constant_elements->get(1)));
@ -1734,7 +1733,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else { } else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays); FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS ? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr); VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) { if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize); int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal. __ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset)); __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));

56
deps/v8/src/mips/ic-mips.cc

@ -1347,35 +1347,34 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&non_double_value, ne, t0, Operand(at)); __ Branch(&non_double_value, ne, t0, Operand(at));
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS // FAST_DOUBLE_ELEMENTS and complete the store.
// and complete the store. __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS, FAST_DOUBLE_ELEMENTS,
receiver_map, receiver_map,
t0, t0,
&slow); &slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check); __ jmp(&fast_double_without_map_check);
__ bind(&non_double_value); __ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
receiver_map, receiver_map,
t0, t0,
&slow); &slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store); __ jmp(&finish_object_store);
__ bind(&transition_double_elements); __ bind(&transition_double_elements);
// Elements are double, but value is an Object that's not a HeapNumber. Make // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// sure that the receiver is a Array with Object elements and transition array // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// from double elements to Object elements. // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS, FAST_ELEMENTS,
receiver_map, receiver_map,
@ -1472,7 +1471,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0. // Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) { if (!FLAG_trace_elements_transitions) {
Label fail; Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ Ret(USE_DELAY_SLOT); __ Ret(USE_DELAY_SLOT);
__ mov(v0, a2); __ mov(v0, a2);
__ bind(&fail); __ bind(&fail);
@ -1689,12 +1688,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code. // Activate inlined smi code.
if (previous_state == UNINITIALIZED) { if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); PatchInlinedSmiCode(address());
} }
} }
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { void PatchInlinedSmiCode(Address address) {
Address andi_instruction_address = Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset; address + Assembler::kCallTargetAddressOffset;
@ -1728,30 +1727,33 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address); Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr = Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize); Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site. ASSERT(Assembler::IsAndImmediate(instr_at_patch));
// Enabling by changing from ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::IsBeq(branch_instr)) {
// This is patching a "jump if not smi" site to be active.
// Changing:
// andi at, rx, 0 // andi at, rx, 0
// Branch <target>, eq, at, Operand(zero_reg) // Branch <target>, eq, at, Operand(zero_reg)
// to: // to:
// andi at, rx, #kSmiTagMask // andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg) // Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2); CodePatcher patcher(patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsAndImmediate(instr_at_patch));
ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask); patcher.masm()->andi(at, reg, kSmiTagMask);
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::IsBeq(branch_instr)) {
patcher.ChangeBranchCondition(ne); patcher.ChangeBranchCondition(ne);
} else { } else {
ASSERT(Assembler::IsBne(branch_instr)); ASSERT(Assembler::IsBne(branch_instr));
// This is patching a "jump if smi" site to be active.
// Changing:
// andi at, rx, 0
// Branch <target>, ne, at, Operand(zero_reg)
// to:
// andi at, rx, #kSmiTagMask
// Branch <target>, eq, at, Operand(zero_reg)
CodePatcher patcher(patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask);
patcher.ChangeBranchCondition(eq); patcher.ChangeBranchCondition(eq);
} }
} }

134
deps/v8/src/mips/lithium-codegen-mips.cc

@ -2343,37 +2343,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object()); Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = scratch0(); Register scratch = scratch0();
int map_count = instr->hydrogen()->types()->length(); int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name(); Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ li(a2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done; Label done;
__ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) { for (int i = 0; i < map_count - 1; ++i) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i); Handle<Map> map = instr->hydrogen()->types()->at(i);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next; Label next;
__ Branch(&next, ne, scratch, Operand(map)); __ Branch(&next, ne, scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name); EmitLoadFieldOrConstantFunction(result, object, map, name);
__ Branch(&done); __ Branch(&done);
__ bind(&next); __ bind(&next);
} }
} Handle<Map> map = instr->hydrogen()->types()->last();
if (need_generic) { if (instr->hydrogen()->need_generic()) {
Label generic;
__ Branch(&generic, ne, scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ Branch(&done);
__ bind(&generic);
__ li(a2, Operand(name)); __ li(a2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
} }
__ bind(&done); __ bind(&done);
}
} }
@ -2448,10 +2451,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift, __ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount); Map::kElementsKindBitCount);
__ Branch(&fail, lt, scratch, __ Branch(&done, eq, scratch,
Operand(GetInitialFastElementsKind())); Operand(FAST_ELEMENTS));
__ Branch(&done, le, scratch,
Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ Branch(&fail, lt, scratch, __ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch, __ Branch(&done, le, scratch,
@ -2504,9 +2505,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result. // Load the result.
__ sll(scratch, key, kPointerSizeLog2); // Key indexes words. __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
__ addu(scratch, elements, scratch); __ addu(scratch, elements, scratch);
uint32_t offset = FixedArray::kHeaderSize + __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
(instr->additional_index() << kPointerSizeLog2);
__ lw(result, FieldMemOperand(scratch, offset));
// Check for the hole value. // Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2537,21 +2536,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
} }
if (key_is_constant) { if (key_is_constant) {
__ Addu(elements, elements, __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
Operand(((constant_key + instr->additional_index()) << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)); FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else { } else {
__ sll(scratch, key, shift_size); __ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch)); __ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements, __ Addu(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
(instr->additional_index() << shift_size)));
} }
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
__ ldc1(result, MemOperand(elements)); __ ldc1(result, MemOperand(elements));
} }
@ -2573,41 +2568,32 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result()); FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) { if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << shift_size); __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else { } else {
__ sll(scratch0(), key, shift_size); __ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer); __ Addu(scratch0(), scratch0(), external_pointer);
} }
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0(), additional_offset)); __ lwc1(result, MemOperand(scratch0()));
__ cvt_d_s(result, result); __ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ ldc1(result, MemOperand(scratch0(), additional_offset)); __ ldc1(result, MemOperand(scratch0()));
} }
} else { } else {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch = scratch0(); Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg); MemOperand mem_operand(zero_reg);
if (key_is_constant) { if (key_is_constant) {
mem_operand = mem_operand = MemOperand(external_pointer,
MemOperand(external_pointer, constant_key * (1 << shift_size));
(constant_key << shift_size) + additional_offset);
} else { } else {
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size); __ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer); __ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch); mem_operand = MemOperand(scratch);
} }
@ -2640,10 +2626,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3525,17 +3508,11 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset = int offset =
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+ FixedArray::kHeaderSize;
__ sw(value, FieldMemOperand(elements, offset)); __ sw(value, FieldMemOperand(elements, offset));
} else { } else {
__ sll(scratch, key, kPointerSizeLog2); __ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch); __ addu(scratch, elements, scratch);
if (instr->additional_index() != 0) {
__ Addu(scratch,
scratch,
instr->additional_index() << kPointerSizeLog2);
}
__ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
} }
@ -3578,7 +3555,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} }
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) { if (key_is_constant) {
__ Addu(scratch, elements, Operand((constant_key << shift_size) + __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)); FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else { } else {
__ sll(scratch, key, shift_size); __ sll(scratch, key, shift_size);
@ -3599,7 +3576,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
} }
__ bind(&not_nan); __ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size)); __ sdc1(value, MemOperand(scratch));
} }
@ -3620,13 +3597,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
int shift_size = ElementsKindToShiftSize(elements_kind); int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value())); FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) { if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << shift_size); __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else { } else {
__ sll(scratch0(), key, shift_size); __ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer); __ Addu(scratch0(), scratch0(), external_pointer);
@ -3634,27 +3610,19 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value); __ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset)); __ swc1(double_scratch0(), MemOperand(scratch0()));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ sdc1(value, MemOperand(scratch0(), additional_offset)); __ sdc1(value, MemOperand(scratch0()));
} }
} else { } else {
Register value(ToRegister(instr->value())); Register value(ToRegister(instr->value()));
Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg); MemOperand mem_operand(zero_reg);
Register scratch = scratch0();
if (key_is_constant) { if (key_is_constant) {
mem_operand = MemOperand(external_pointer, mem_operand = MemOperand(external_pointer,
((constant_key + instr->additional_index()) constant_key * (1 << shift_size));
<< shift_size));
} else { } else {
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size); __ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer); __ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch); mem_operand = MemOperand(scratch);
} }
@ -3676,10 +3644,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS: case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS: case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS: case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE(); UNREACHABLE();
@ -3717,21 +3682,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map)); __ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map)); __ li(new_map_reg, Operand(to_map));
if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) { if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier. // Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs); scratch, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) && } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
IsFastDoubleElementsKind(to_kind)) { to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2)); ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3)); ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg); __ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr); RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) && } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg()); Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2)); ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3)); ASSERT(new_map_reg.is(a3));
@ -4486,9 +4450,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has // than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // already been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object()); __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2. // Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@ -4641,11 +4604,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind = ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind(); instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different // Deopt if the literal boilerplate ElementsKind is of a type different than
// than the expected one. The check isn't necessary if the boilerplate has // the expected one. The check isn't necessary if the boilerplate has already
// already been converted to TERMINAL_FAST_ELEMENTS_KIND. // been converted to FAST_ELEMENTS.
if (CanTransitionToMoreGeneralFastElementsKind( if (boilerplate_elements_kind != FAST_ELEMENTS) {
boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate()); __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
// Load map into a2. // Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));

5
deps/v8/src/mips/lithium-mips.cc

@ -2023,9 +2023,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind( LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) { HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind(); if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
ElementsKind to_kind = instr->transitioned_map()->elements_kind(); instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object()); LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister(); LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result = LTransitionElementsKind* result =

6
deps/v8/src/mips/lithium-mips.h

@ -1201,7 +1201,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1218,7 +1217,6 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1239,7 +1237,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1708,7 +1705,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; } LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };
@ -1731,7 +1727,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; } LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; } LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; } LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
}; };
@ -1776,7 +1771,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const { ElementsKind elements_kind() const {
return hydrogen()->elements_kind(); return hydrogen()->elements_kind();
} }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
}; };

81
deps/v8/src/mips/macro-assembler-mips.cc

@ -3341,39 +3341,33 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
Operand(Map::kMaximumBitField2FastHoleyElementValue));
} }
void MacroAssembler::CheckFastObjectElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch, Branch(fail, ls, scratch,
Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Branch(fail, hi, scratch, Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue)); Operand(Map::kMaximumBitField2FastElementValue));
} }
void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch, Register scratch,
Label* fail) { Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
} }
@ -3475,17 +3469,22 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
Operand right = Operand(map); Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind(); Map* transitioned_fast_element_map(
if (IsFastElementsKind(kind)) { map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
bool packed = IsFastPackedElementsKind(kind); ASSERT(transitioned_fast_element_map == NULL ||
Map* current_map = *map; map->elements_kind() != FAST_ELEMENTS);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { if (transitioned_fast_element_map != NULL) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
Branch(early_success, eq, scratch, right); Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(current_map)); right = Operand(Handle<Map>(transitioned_fast_element_map));
} }
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(transitioned_double_map));
} }
} }
@ -4444,37 +4443,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map. // Check that the function's map is the same as the expected cached map.
lw(scratch, int expected_index =
MemOperand(scratch, Context::GetContextMapIndexFromElementsKind(expected_kind);
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
size_t offset = expected_kind * kPointerSize + Branch(no_map_match, ne, map_in_out, Operand(at));
FixedArrayBase::kHeaderSize;
Branch(no_map_match, ne, map_in_out, Operand(scratch));
// Use the transitioned cached map. // Use the transitioned cached map.
offset = transitioned_kind * kPointerSize + int trans_index =
FixedArrayBase::kHeaderSize; Context::GetContextMapIndexFromElementsKind(transitioned_kind);
lw(map_in_out, FieldMemOperand(scratch, offset)); lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
} }
void MacroAssembler::LoadInitialArrayMap( void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register function_in, Register scratch, Register map_out) {
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out)); ASSERT(!function_in.is(map_out));
Label done; Label done;
lw(map_out, FieldMemOperand(function_in, lw(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset)); JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) { if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out, map_out,
scratch, scratch,
&done); &done);
@ -5389,7 +5378,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address), : address_(address),
instructions_(instructions), instructions_(instructions),
size_(instructions * Assembler::kInstrSize), size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) { masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size // The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints. // bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/mips/macro-assembler-mips.h

@ -819,8 +819,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction. // Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in, void LoadInitialArrayMap(Register function_in,
Register scratch, Register scratch,
Register map_out, Register map_out);
bool can_have_holes);
void LoadGlobalFunction(int index, Register function); void LoadGlobalFunction(int index, Register function);
@ -962,7 +961,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only // Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not. // elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map, void CheckFastSmiOnlyElements(Register map,
Register scratch, Register scratch,
Label* fail); Label* fail);

136
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -43,31 +43,27 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
/* /*
* This assembler uses the following register assignment convention * This assembler uses the following register assignment convention
* - t7 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - t1 : Pointer to current code object (Code*) including heap object tag. * - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string. * - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset! * Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using * - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods. * LoadCurrentCharacter before using any of the dispatch methods.
* - t4 : Points to tip of backtrack stack * - t4 : points to tip of backtrack stack
* - t5 : Unused. * - t5 : Unused.
* - t6 : End of input (points to byte after last character in input). * - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and * - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers. * RegExp registers.
* - sp : Points to tip of C stack. * - sp : points to tip of C stack.
* *
* The remaining registers are free for computations. * The remaining registers are free for computations.
* Each call to a public method should retain this convention. * Each call to a public method should retain this convention.
* *
* The stack will have the following structure: * The stack will have the following structure:
* *
* - fp[64] Isolate* isolate (address of the current isolate) * - fp[56] direct_call (if 1, direct call from JavaScript code,
* - fp[60] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system). * if 0, call through the runtime system).
* - fp[56] stack_area_base (High end of the memory area to use as * - fp[52] stack_area_base (High end of the memory area to use as
* backtracking stack). * backtracking stack).
* - fp[52] capture array size (may fit multiple sets of matches)
* - fp[48] int* capture_array (int[num_saved_registers_], for output). * - fp[48] int* capture_array (int[num_saved_registers_], for output).
* - fp[44] secondary link/return address used by native call. * - fp[44] secondary link/return address used by native call.
* --- sp when called --- * --- sp when called ---
@ -75,17 +71,16 @@ namespace internal {
* - fp[36] old frame pointer (r11). * - fp[36] old frame pointer (r11).
* - fp[0..32] backup of registers s0..s7. * - fp[0..32] backup of registers s0..s7.
* --- frame pointer ---- * --- frame pointer ----
* - fp[-4] end of input (address of end of string). * - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (address of first character in string). * - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start). * - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string). * - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches). * - fp[-20] Offset of location before start of input (effectively character
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a * position -1). Used to initialize capture registers to a
* non-position. * non-position.
* - fp[-28] At start (if 1, we are starting at the start of the * - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0) * string, otherwise 0)
* - fp[-32] register 0 (Only positions must be stored in the first * - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers) * - register 1 num_saved_registers_ registers)
* - ... * - ...
* - register num_registers-1 * - register num_registers-1
@ -206,8 +201,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start; Label not_at_start;
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kStartIndex)); __ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg)); BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart)); __ lw(a1, MemOperand(frame_pointer(), kInputStart));
@ -219,8 +214,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) { void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kStartIndex)); __ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg)); BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart)); __ lw(a1, MemOperand(frame_pointer(), kInputStart));
__ Addu(a0, end_of_input_address(), Operand(current_input_offset())); __ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
@ -645,7 +640,6 @@ void RegExpMacroAssemblerMIPS::Fail() {
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) { Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
if (masm_->has_exception()) { if (masm_->has_exception()) {
// If the code gets corrupted due to long regular expressions and lack of // If the code gets corrupted due to long regular expressions and lack of
// space on trampolines, an internal exception flag is set. If this case // space on trampolines, an internal exception flag is set. If this case
@ -675,9 +669,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call // Set frame pointer in space for it if this is not a direct call
// from generated code. // from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize)); __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
__ push(a0); // Make room for "position - 1" constant (value irrelevant). __ push(a0); // Make room for "position - 1" constant (value irrelevant).
__ push(a0); // Make room for "at start" constant (value irrelevant).
// Check if we have space on the stack for registers. // Check if we have space on the stack for registers.
Label stack_limit_hit; Label stack_limit_hit;
@ -696,12 +689,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack // Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers. // for our working registers.
__ li(v0, Operand(EXCEPTION)); __ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0); __ jmp(&exit_label_);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
CallCheckStackGuardState(a0); CallCheckStackGuardState(a0);
// If returned value is non-zero, we exit with the returned value as result. // If returned value is non-zero, we exit with the returned value as result.
__ Branch(&return_v0, ne, v0, Operand(zero_reg)); __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
__ bind(&stack_ok); __ bind(&stack_ok);
// Allocate space on stack for registers. // Allocate space on stack for registers.
@ -722,25 +715,16 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// position registers. // position registers.
__ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Initialize code pointer register // Determine whether the start index is zero, that is at the start of the
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); // string, and store that value in a local variable.
__ mov(t5, a1);
Label load_char_start_regexp, start_regexp; __ li(a1, Operand(1));
// Load newline if index is at start, previous character otherwise. __ Movn(a1, zero_reg, t5);
__ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); __ sw(a1, MemOperand(frame_pointer(), kAtStart));
__ li(current_character(), Operand('\n'));
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1. // Fill saved registers with initial value = start offset - 1.
if (num_saved_registers_ > 8) {
// Address of register 0. // Address of register 0.
__ Addu(a1, frame_pointer(), Operand(kRegisterZero)); __ Addu(a1, frame_pointer(), Operand(kRegisterZero));
__ li(a2, Operand(num_saved_registers_)); __ li(a2, Operand(num_saved_registers_));
@ -750,16 +734,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a1, a1, Operand(-kPointerSize)); __ Addu(a1, a1, Operand(-kPointerSize));
__ Subu(a2, a2, Operand(1)); __ Subu(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg)); __ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ sw(a0, register_location(i));
}
}
} }
// Initialize backtrack stack pointer. // Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
// Load previous char as initial value of current character register.
Label at_start;
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
__ Branch(&at_start, ne, a0, Operand(zero_reg));
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ li(current_character(), Operand('\n'));
__ jmp(&start_label_); __ jmp(&start_label_);
@ -788,10 +776,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) { for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i)); __ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1)); __ lw(a3, register_location(i + 1));
if (global()) {
// Keep capture start in a4 for the zero-length check later.
__ mov(t7, a2);
}
if (mode_ == UC16) { if (mode_ == UC16) {
__ sra(a2, a2, 1); __ sra(a2, a2, 1);
__ Addu(a2, a2, a1); __ Addu(a2, a2, a1);
@ -807,52 +791,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a0, a0, kPointerSize); __ Addu(a0, a0, kPointerSize);
} }
} }
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ Addu(a0, a0, 1);
__ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Subu(a1, a1, num_saved_registers_);
// Check whether we have enough room for another set of capture results.
__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
__ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ Addu(a2, a2, num_saved_registers_ * kPointerSize);
__ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
__ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// t7: capture start index
// Not a zero-length match, restart.
__ Branch(
&load_char_start_regexp, ne, current_input_offset(), Operand(t7));
// Offset from the end is zero if we already reached the end.
__ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg));
// Advance current position after a zero-length match.
__ Addu(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ Branch(&load_char_start_regexp);
} else {
__ li(v0, Operand(SUCCESS)); __ li(v0, Operand(SUCCESS));
} }
}
// Exit and return v0. // Exit and return v0.
__ bind(&exit_label_); __ bind(&exit_label_);
if (global()) {
__ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_v0);
// Skip sp past regexp registers and local variables.. // Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer()); __ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc). // Restore registers s0..s7 and return (restoring ra to pc).
@ -878,7 +820,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers_to_retain); __ MultiPop(regexp_registers_to_retain);
// If returning non-zero, we should end execution with the given // If returning non-zero, we should end execution with the given
// result as return value. // result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg)); __ Branch(&exit_label_, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame. // String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -922,7 +864,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&exit_with_exception); __ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception. // Exit with Result EXCEPTION(-1) to signal thrown exception.
__ li(v0, Operand(EXCEPTION)); __ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0); __ jmp(&exit_label_);
} }
} }
@ -1070,9 +1012,8 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
} }
bool RegExpMacroAssemblerMIPS::Succeed() { void RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_); __ jmp(&success_label_);
return global();
} }
@ -1339,9 +1280,8 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) { int characters) {
Register offset = current_input_offset(); Register offset = current_input_offset();
if (cp_offset != 0) { if (cp_offset != 0) {
// t7 is not being used to store the capture start index at this point. __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
__ Addu(t7, current_input_offset(), Operand(cp_offset * char_size())); offset = a0;
offset = t7;
} }
// We assume that we cannot do unaligned loads on MIPS, so this function // We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time. // must only be used to load a single character at a time.

11
deps/v8/src/mips/regexp-macro-assembler-mips.h

@ -115,7 +115,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by); virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual bool Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg); virtual void WriteStackPointerToRegister(int reg);
@ -141,8 +141,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStackFrameHeader = kReturnAddress + kPointerSize; static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller. // Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20; static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize;
@ -154,10 +153,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize; static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in // When adding local variables remember to push space for them in
// the frame in GetCode. // the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize; static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack. // First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize; static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer. // Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024; static const size_t kRegExpCodeSize = 1024;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save