Browse Source

Roll V8 back to 3.10.8.13

v0.9.1-release
isaacs 13 years ago
parent
commit
46b09e4190
  1. 107
      deps/v8/ChangeLog
  2. 27
      deps/v8/DEPS
  3. 7
      deps/v8/Makefile
  4. 17
      deps/v8/SConstruct
  5. 131
      deps/v8/build/common.gypi
  6. 36
      deps/v8/build/gyp_v8
  7. 12
      deps/v8/build/standalone.gypi
  8. 8
      deps/v8/include/v8.h
  9. 1
      deps/v8/src/SConscript
  10. 37
      deps/v8/src/api.cc
  11. 4
      deps/v8/src/api.h
  12. 9
      deps/v8/src/arm/builtins-arm.cc
  13. 35
      deps/v8/src/arm/code-stubs-arm.cc
  14. 4
      deps/v8/src/arm/codegen-arm.cc
  15. 4
      deps/v8/src/arm/debug-arm.cc
  16. 7
      deps/v8/src/arm/full-codegen-arm.cc
  17. 53
      deps/v8/src/arm/ic-arm.cc
  18. 5
      deps/v8/src/arm/lithium-arm.cc
  19. 9
      deps/v8/src/arm/lithium-arm.h
  20. 134
      deps/v8/src/arm/lithium-codegen-arm.cc
  21. 80
      deps/v8/src/arm/macro-assembler-arm.cc
  22. 5
      deps/v8/src/arm/macro-assembler-arm.h
  23. 144
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  24. 13
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  25. 12
      deps/v8/src/arm/simulator-arm.h
  26. 50
      deps/v8/src/arm/stub-cache-arm.cc
  27. 20
      deps/v8/src/bootstrapper.cc
  28. 85
      deps/v8/src/builtins.cc
  29. 32
      deps/v8/src/code-stubs.cc
  30. 1
      deps/v8/src/code-stubs.h
  31. 6
      deps/v8/src/codegen.h
  32. 20
      deps/v8/src/contexts.h
  33. 53
      deps/v8/src/d8.cc
  34. 2
      deps/v8/src/d8.h
  35. 32
      deps/v8/src/debug-agent.cc
  36. 42
      deps/v8/src/debug.cc
  37. 50
      deps/v8/src/debug.h
  38. 134
      deps/v8/src/elements-kind.cc
  39. 210
      deps/v8/src/elements-kind.h
  40. 400
      deps/v8/src/elements.cc
  41. 22
      deps/v8/src/elements.h
  42. 5
      deps/v8/src/factory.cc
  43. 13
      deps/v8/src/factory.h
  44. 3
      deps/v8/src/flag-definitions.h
  45. 3
      deps/v8/src/frames.h
  46. 2
      deps/v8/src/func-name-inferrer.h
  47. 3
      deps/v8/src/globals.h
  48. 20
      deps/v8/src/heap-inl.h
  49. 70
      deps/v8/src/heap.cc
  50. 20
      deps/v8/src/heap.h
  51. 33
      deps/v8/src/hydrogen-instructions.cc
  52. 164
      deps/v8/src/hydrogen-instructions.h
  53. 313
      deps/v8/src/hydrogen.cc
  54. 3
      deps/v8/src/hydrogen.h
  55. 3
      deps/v8/src/ia32/assembler-ia32.h
  56. 9
      deps/v8/src/ia32/builtins-ia32.cc
  57. 38
      deps/v8/src/ia32/code-stubs-ia32.cc
  58. 4
      deps/v8/src/ia32/codegen-ia32.cc
  59. 31
      deps/v8/src/ia32/debug-ia32.cc
  60. 16
      deps/v8/src/ia32/full-codegen-ia32.cc
  61. 38
      deps/v8/src/ia32/ic-ia32.cc
  62. 163
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  63. 3
      deps/v8/src/ia32/lithium-codegen-ia32.h
  64. 8
      deps/v8/src/ia32/lithium-ia32.cc
  65. 12
      deps/v8/src/ia32/lithium-ia32.h
  66. 80
      deps/v8/src/ia32/macro-assembler-ia32.cc
  67. 5
      deps/v8/src/ia32/macro-assembler-ia32.h
  68. 148
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  69. 13
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  70. 8
      deps/v8/src/ia32/simulator-ia32.h
  71. 27
      deps/v8/src/ia32/stub-cache-ia32.cc
  72. 85
      deps/v8/src/ic.cc
  73. 20
      deps/v8/src/ic.h
  74. 26
      deps/v8/src/incremental-marking-inl.h
  75. 43
      deps/v8/src/incremental-marking.cc
  76. 15
      deps/v8/src/incremental-marking.h
  77. 2
      deps/v8/src/isolate.h
  78. 88
      deps/v8/src/jsregexp.cc
  79. 100
      deps/v8/src/jsregexp.h
  80. 8
      deps/v8/src/list-inl.h
  81. 3
      deps/v8/src/list.h
  82. 7
      deps/v8/src/lithium.cc
  83. 60
      deps/v8/src/liveedit.cc
  84. 28
      deps/v8/src/mark-compact-inl.h
  85. 224
      deps/v8/src/mark-compact.cc
  86. 64
      deps/v8/src/mark-compact.h
  87. 230
      deps/v8/src/messages.js
  88. 9
      deps/v8/src/mips/builtins-mips.cc
  89. 38
      deps/v8/src/mips/code-stubs-mips.cc
  90. 4
      deps/v8/src/mips/codegen-mips.cc
  91. 4
      deps/v8/src/mips/debug-mips.cc
  92. 8
      deps/v8/src/mips/full-codegen-mips.cc
  93. 56
      deps/v8/src/mips/ic-mips.cc
  94. 134
      deps/v8/src/mips/lithium-codegen-mips.cc
  95. 5
      deps/v8/src/mips/lithium-mips.cc
  96. 6
      deps/v8/src/mips/lithium-mips.h
  97. 81
      deps/v8/src/mips/macro-assembler-mips.cc
  98. 5
      deps/v8/src/mips/macro-assembler-mips.h
  99. 136
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  100. 11
      deps/v8/src/mips/regexp-macro-assembler-mips.h

107
deps/v8/ChangeLog

@ -1,110 +1,3 @@
2012-05-29: Version 3.11.7
Get better function names in stack traces.
Performance and stability improvements on all platforms.
2012-05-24: Version 3.11.6
Fixed RegExp.prototype.toString for incompatible receivers
(issue 1981).
Performance and stability improvements on all platforms.
2012-05-23: Version 3.11.5
Performance and stability improvements on all platforms.
2012-05-22: Version 3.11.4
Some cleanup to common.gypi. This fixes some host/target combinations
that weren't working in the Make build on Mac.
Handle EINTR in socket functions and continue incomplete sends.
(issue 2098)
Fixed python deprecations. (issue 1391)
Made socket send and receive more robust and return 0 on failure.
(Chromium issue 15719)
Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Set '-m32' option for host and target platforms
Performance and stability improvements on all platforms.
2012-05-18: Version 3.11.3
Disable optimization for functions that have scopes that cannot be
reconstructed from the context chain. (issue 2071)
Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
Correctly check for native error objects. (Chromium issue 2138)
Performance and stability improvements on all platforms.
2012-05-16: Version 3.11.2
Revert r11496. (Chromium issue 128146)
Implement map collection for incremental marking. (issue 1465)
Add toString method to CallSite (which describes a frame of the
stack trace).
2012-05-15: Version 3.11.1
Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
Fix freebsd build. (V8 issue 2126)
Performance and stability improvements on all platforms.
2012-05-11: Version 3.11.0
Fixed compose-discard crasher from r11524 (issue 2123).
Activated new global semantics by default. Global variables can
now shadow properties of the global object (ES5.1 erratum).
Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
transitioning (Chromium issue 117409).
Made Error.prototype.name writable again, as required by the spec and
the web (Chromium issue 69187).
Implemented map collection with incremental marking (issue 1465).
Regexp: Fixed overflow in min-match-length calculation
(Chromium issue 126412).
MIPS: Fixed illegal instruction use on Loongson in code for
Math.random() (issue 2115).
Fixed crash bug in VisitChoice (Chromium issue 126272).
Fixed unsigned-Smi check in MappedArgumentsLookup
(Chromium issue 126414).
Fixed LiveEdit for function with no locals (issue 825).
Fixed register clobbering in LoadIC for interceptors
(Chromium issue 125988).
Implemented clearing of CompareICs (issue 2102).
Performance and stability improvements on all platforms.
2012-05-03: Version 3.10.8
Enabled MIPS cross-compilation.

27
deps/v8/DEPS

@ -1,27 +0,0 @@
# Note: The buildbots evaluate this file with CWD set to the parent
# directory and assume that the root of the checkout is in ./v8/, so
# all paths in here must match this assumption.
deps = {
# Remember to keep the revision in sync with the Makefile.
"v8/build/gyp":
"http://gyp.googlecode.com/svn/trunk@1282",
}
deps_os = {
"win": {
"v8/third_party/cygwin":
"http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
"v8/third_party/python_26":
"http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
}
}
hooks = [
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "v8/build/gyp_v8"],
},
]

7
deps/v8/Makefile

@ -137,12 +137,6 @@ ENVFILE = $(OUTDIR)/environment
# Target definitions. "all" is the default.
all: $(MODES)
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
# having been created before.
buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@ -228,7 +222,6 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS)

17
deps/v8/SConstruct

@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
'CPPDEFINES': ['V8_SHARED']
}
},
'os:freebsd': {
@ -1601,17 +1601,4 @@ except:
pass
def WarnAboutDeprecation():
print """
#######################################################
# WARNING: Building V8 with SCons is deprecated and #
# will not work much longer. Please switch to using #
# the GYP-based build now. Instructions are at #
# http://code.google.com/p/v8/wiki/BuildingWithGYP. #
#######################################################
"""
WarnAboutDeprecation()
import atexit
atexit.register(WarnAboutDeprecation)
Build()

131
deps/v8/build/common.gypi

@ -110,6 +110,13 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['OS!="mac"', {
# TODO(mark): The OS!="mac" conditional is temporary. It can be
# removed once the Mac Chromium build stops setting target_arch to
# ia32 and instead sets it to mac. Other checks in this file for
# OS=="mac" can be removed at that time as well. This can be cleaned
# up once http://crbug.com/44205 is fixed.
'conditions': [
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
@ -145,13 +152,23 @@
'USE_EABI_HARDFLOAT=0',
],
}],
# The ARM assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64" or OS=="android"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
],
}], # v8_target_arch=="arm"
}],
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
}],
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
@ -206,21 +223,38 @@
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
],
}], # v8_target_arch=="mips"
}],
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
],
}],
],
}, { # Section for OS=="mac".
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {
'ARCHS': [ 'x86_64' ],
},
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}], # v8_target_arch=="x64"
'ARCHS': ['i386'],
}
}],
['target_arch=="x64"', {
'xcode_settings': {
'ARCHS': ['x86_64'],
}
}],
],
}],
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
@ -238,10 +272,6 @@
'defines': [
'WIN32',
],
'msvs_configuration_attributes': {
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
@ -253,48 +283,21 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
], # conditions
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
[ 'v8_target_arch!="x64"', {
# Pass -m32 to the compiler iff it understands the flag.
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
], # conditions
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
], # conditions
'configurations': {
@ -319,11 +322,21 @@
},
'VCLinkerTool': {
'LinkIncremental': '2',
# For future reference, the stack size needs to be increased
# when building for Windows 64-bit, otherwise some test cases
# can cause stack overflow.
# 'StackReserveSize': '297152',
},
},
'conditions': [
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -351,6 +364,12 @@
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@ -363,6 +382,11 @@
},
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',
@ -383,7 +407,12 @@
'VCLinkerTool': {
'LinkIncremental': '1',
'OptimizeReferences': '2',
'OptimizeForWindows98': '1',
'EnableCOMDATFolding': '2',
# For future reference, the stack size needs to be
# increased when building for Windows 64-bit, otherwise
# some test cases can cause stack overflow.
# 'StackReserveSize': '297152',
},
},
}], # OS=="win"

36
deps/v8/build/gyp_v8

@ -38,11 +38,6 @@ import sys
script_dir = os.path.dirname(__file__)
v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
if __name__ == '__main__':
os.chdir(v8_root)
script_dir = os.path.dirname(__file__)
v8_root = '.'
sys.path.insert(0, os.path.join(v8_root, 'tools'))
import utils
@ -98,7 +93,7 @@ def additional_include_files(args=[]):
result.append(path)
# Always include standalone.gypi
AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
AddInclude(os.path.join(script_dir, 'standalone.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@ -140,10 +135,7 @@ if __name__ == '__main__':
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
# Note that this must not start with "./" or things break.
# So we rely on having done os.chdir(v8_root) above and use the
# relative path.
args.append(os.path.join('build', 'all.gyp'))
args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
@ -164,6 +156,28 @@ if __name__ == '__main__':
# Generate for the architectures supported on the given platform.
gyp_args = list(args)
target_arch = None
for p in gyp_args:
if p.find('-Dtarget_arch=') == 0:
target_arch = p
if target_arch is None:
gyp_args.append('-Dtarget_arch=ia32')
if utils.GuessOS() == 'linux':
gyp_args.append('-S.ia32')
run_gyp(gyp_args)
if utils.GuessOS() == 'linux':
gyp_args.append('--generator-output=out')
gyp_args = list(args)
gyp_args.append('-Dtarget_arch=x64')
gyp_args.append('-S.x64')
run_gyp(gyp_args)
gyp_args = list(args)
gyp_args.append('-Dv8_target_arch=arm')
gyp_args.append('-S.arm')
run_gyp(gyp_args)
gyp_args = list(args)
gyp_args.append('-Dv8_target_arch=mips')
gyp_args.append('-S.mips')
run_gyp(gyp_args)

12
deps/v8/build/standalone.gypi

@ -37,9 +37,8 @@
'variables': {
'variables': {
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
OS=="netbsd" or OS=="mac"', {
# This handles the Unix platforms we generally deal with.
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
# This handles the Linux platforms we generally deal with.
# Anything else gets passed through, which probably won't work
# very well; such hosts should pass an explicit target_arch
# to gyp.
@ -47,8 +46,7 @@
'<!(uname -m | sed -e "s/i.86/ia32/;\
s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
}, {
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and
# OS!="netbsd" and OS!="mac"
# OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
'host_arch%': 'ia32',
}],
],
@ -171,9 +169,6 @@
},
}], # OS=="win"
['OS=="mac"', {
'xcode_settings': {
'SYMROOT': '<(DEPTH)/xcodebuild',
},
'target_defaults': {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
@ -193,7 +188,6 @@
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
'MACOSX_DEPLOYMENT_TARGET': '10.4', # -mmacosx-version-min=10.4
'PREBINDING': 'NO', # No -Wl,-prebind
'SYMROOT': '<(DEPTH)/xcodebuild',
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fno-strict-aliasing',

8
deps/v8/include/v8.h

@ -62,13 +62,11 @@
#else // _WIN32
// Setup for Linux shared library export.
// Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)

1
deps/v8/src/SConscript

@ -68,7 +68,6 @@ SOURCES = {
diy-fp.cc
dtoa.cc
elements.cc
elements-kind.cc
execution.cc
factory.cc
flags.cc

37
deps/v8/src/api.cc

@ -5040,7 +5040,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastObjectElements()) {
if (!self->HasFastElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@ -6045,6 +6045,13 @@ int HeapGraphNode::GetSelfSize() const {
}
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->retained_size();
}
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@ -6056,7 +6063,29 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->children()[index]);
&ToInternal(this)->children()[index]);
}
int HeapGraphNode::GetRetainersCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
}
@ -6128,7 +6157,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
int HeapSnapshot::GetNodesCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries().length();
return ToInternal(this)->entries()->length();
}
@ -6136,7 +6165,7 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index));
ToInternal(this)->entries()->at(index));
}

4
deps/v8/src/api.h

@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastObjectElements());
ASSERT(value()->HasFastElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastObjectElements());
ASSERT(value_->HasFastElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}

9
deps/v8/src/arm/builtins-arm.cc

@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -208,8 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@ -441,10 +440,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,

35
deps/v8/src/arm/code-stubs-arm.cc

@ -4824,32 +4824,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
// Argument 9 (sp[20]): Pass current isolate address.
// Argument 8 (sp[16]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@ -4898,9 +4893,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@ -7102,8 +7095,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@ -7366,9 +7359,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiElements(r2, r5, &fast_elements);
__ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7380,7 +7373,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@ -7391,8 +7384,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));

4
deps/v8/src/arm/codegen-arm.cc

@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value

4
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -125,8 +125,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm)

7
deps/v8/src/arm/full-codegen-arm.cc

@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1722,7 +1722,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1750,7 +1751,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));

53
deps/v8/src/arm/ic-arm.cc

@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
void PatchInlinedSmiCode(Address address) {
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@ -1729,31 +1729,34 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq/ne, <target>
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne/eq, <target>
// and vice-versa to be disabled again.
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
patcher.masm()->tst(reg, Operand(kSmiTagMask));
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsTstImmediate(instr_at_patch));
patcher.masm()->cmp(reg, reg);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}

5
deps/v8/src/arm/lithium-arm.cc

@ -2082,9 +2082,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =

9
deps/v8/src/arm/lithium-arm.h

@ -1236,7 +1236,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1253,13 +1252,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1273,7 +1272,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1742,7 +1740,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1765,7 +1762,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1810,7 +1806,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

134
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2587,38 +2587,42 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
__ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
for (int i = 0; i < map_count - 1; ++i) {
Handle<Map> map = instr->hydrogen()->types()->at(i);
__ cmp(scratch, Operand(map));
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next;
__ cmp(scratch, Operand(map));
__ b(ne, &next);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&next);
}
}
if (need_generic) {
Handle<Map> map = instr->hydrogen()->types()->last();
__ cmp(scratch, Operand(map));
if (instr->hydrogen()->need_generic()) {
Label generic;
__ b(ne, &generic);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ b(&done);
__ bind(&generic);
__ mov(r2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
}
__ bind(&done);
}
}
@ -2696,10 +2700,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ cmp(scratch, Operand(GetInitialFastElementsKind()));
__ b(lt, &fail);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(eq, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@ -2746,9 +2748,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2780,21 +2780,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
? Operand(((constant_key + instr->additional_index()) << shift_size) +
? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0);
}
@ -2816,33 +2813,26 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << shift_size)
? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset);
__ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
__ vldr(result, scratch0(), 0);
}
} else {
Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset)
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@ -2870,12 +2860,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3743,16 +3730,10 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@ -3794,7 +3775,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant
? Operand((constant_key << shift_size) +
? Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@ -3812,7 +3793,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
__ vstr(value, scratch, instr->additional_index() << shift_size);
__ vstr(value, scratch, 0);
}
@ -3833,33 +3814,25 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key << shift_size)
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), additional_offset);
__ vstr(double_scratch0().low(), scratch0(), 0);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), additional_offset);
__ vstr(value, scratch0(), 0);
}
} else {
Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer,
((constant_key + instr->additional_index())
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -3878,10 +3851,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3918,22 +3888,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
@ -4707,9 +4675,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@ -4860,11 +4827,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));

80
deps/v8/src/arm/macro-assembler-arm.cc

@ -1868,12 +1868,10 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
@ -1881,25 +1879,22 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
b(hi, fail);
}
@ -2002,17 +1997,22 @@ void MacroAssembler::CompareMap(Register obj,
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(current_map)));
cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
}
}
}
@ -2865,38 +2865,28 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
ldr(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, scratch);
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
ldr(map_in_out, FieldMemOperand(scratch, offset));
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
@ -3748,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) {
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/arm/macro-assembler-arm.h

@ -512,8 +512,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out,
bool can_have_holes);
Register map_out);
void LoadGlobalFunction(int index, Register function);
@ -803,7 +802,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);

144
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -43,31 +43,28 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - r8 : Points to tip of backtrack stack
* - r8 : points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
* - r13/sp : Points to tip of C stack.
* - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - fp[56] Isolate* isolate (address of the current isolate)
* - fp[52] direct_call (if 1, direct call from JavaScript code,
* - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[48] stack_area_base (high end of the memory area to use as
* - fp[44] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
@ -75,17 +72,16 @@ namespace internal {
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
* - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (address of first character in string).
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-32] register 0 (Only positions must be stored in the first
* - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@ -201,9 +197,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(ne, &not_at_start);
BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -216,9 +212,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(ne, on_not_at_start);
BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@ -659,7 +655,6 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -683,9 +678,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@ -704,13 +698,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&return_r0);
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0);
__ b(ne, &exit_label_);
__ bind(&stack_ok);
@ -731,26 +725,16 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
if (num_saved_registers_ > 8) {
// Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_));
@ -759,17 +743,23 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ str(r0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@ -796,10 +786,6 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
if (global()) {
// Keep capture start in r4 for the zero-length check later.
__ mov(r4, r2);
}
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@ -811,54 +797,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ add(r0, r0, Operand(1));
__ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
__ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// r4: capture start index
__ cmp(current_input_offset(), r4);
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ b(&load_char_start_regexp);
} else {
__ mov(r0, Operand(SUCCESS));
}
}
// Exit and return r0
__ bind(&exit_label_);
if (global()) {
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@ -880,7 +822,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
__ b(ne, &exit_label_);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -917,7 +859,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&return_r0);
__ jmp(&exit_label_);
}
CodeDesc code_desc;
@ -1072,9 +1014,8 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
bool RegExpMacroAssemblerARM::Succeed() {
void RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
return global();
}
@ -1366,9 +1307,8 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
// r4 is not being used to store the capture start index at this point.
__ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
offset = r4;
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
offset = r0;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.

13
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -137,8 +137,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@ -150,10 +149,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

12
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, int, Address, int, Isolate*);
void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \

50
deps/v8/src/arm/stub-cache-arm.cc

@ -1581,29 +1581,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
__ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@ -3385,11 +3372,8 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3513,11 +3497,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3857,11 +3838,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3924,11 +3902,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4067,11 +4042,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4253,7 +4225,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@ -4281,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4291,7 +4263,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
ASSERT(elements_kind == FAST_ELEMENTS);
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));

20
deps/v8/src/bootstrapper.cc

@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->set_instance_descriptors(
heap->empty_descriptor_array());
object_function_map->
set_instance_descriptors(heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@ -516,10 +516,12 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
empty_function_map->set_prototype(
global_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
function_without_prototype_map->instance_descriptors());
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
return empty_function;
}
@ -1092,7 +1094,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastObjectElements());
ASSERT(result->HasFastElements());
#endif
}
@ -1185,7 +1187,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastObjectElements());
ASSERT(result->HasFastElements());
#endif
}
@ -1635,7 +1637,7 @@ bool Genesis::InstallNatives() {
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false;
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
new_map->set_elements_kind(FAST_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.

85
deps/v8/src/builtins.cc

@ -200,12 +200,9 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!global_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(global_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
!global_context->object_js_array_map()->IsUndefined()) {
array->set_map(Map::cast(global_context->object_js_array_map()));
}
}
} else {
@ -225,13 +222,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
}
ElementsKind elements_kind = array->GetElementsKind();
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe_array =
array->TransitionElementsKind(elements_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
// We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
@ -260,7 +250,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
if (IsFastDoubleElementsKind(elements_kind)) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
@ -271,15 +261,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
case FAST_SMI_ONLY_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
}
break;
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -289,7 +277,6 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
}
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
@ -425,7 +412,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
if (args == NULL || array->HasFastElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
@ -435,7 +422,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
if (args == NULL || array->HasFastElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
@ -529,8 +516,8 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@ -601,7 +588,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@ -643,7 +630,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -665,8 +652,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@ -695,7 +682,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastSmiOrObjectElements() ||
if (!array->HasFastTypeElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -711,7 +698,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements();
&& JSObject::cast(receiver)->HasFastTypeElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -776,9 +763,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, elements_kind, k,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
elements_kind, 0, result_len);
FAST_ELEMENTS, 0, result_len);
return result_array;
}
@ -799,7 +786,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
ASSERT(array->HasFastTypeElements());
int len = Smi::cast(array->length())->value();
@ -850,9 +837,9 @@ BUILTIN(ArraySplice) {
{
// Fill newly created array.
CopyObjectToObjectElements(elms, elements_kind, actual_start,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
elements_kind, 0, actual_delete_count);
FAST_ELEMENTS, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -901,13 +888,12 @@ BUILTIN(ArraySplice) {
{
// Copy the part before actual_start as is.
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, kind,
CopyObjectToObjectElements(elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
new_elms, kind,
new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
}
@ -954,12 +940,11 @@ BUILTIN(ArrayConcat) {
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
!JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -976,20 +961,10 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
if (!JSArray::cast(arg)->HasFastSmiElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
elements_kind = FAST_ELEMENTS;
}
}
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
// Allocate result.
JSArray* result_array;
@ -1007,8 +982,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, elements_kind,
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
}

32
deps/v8/src/code-stubs.cc

@ -262,13 +262,10 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
@ -295,9 +292,7 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
case FAST_SMI_ONLY_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
@ -305,7 +300,6 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
}
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
@ -436,32 +430,24 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) {
if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
if (to_ == FAST_ELEMENTS) {
if (from_ == FAST_SMI_ONLY_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
} else if (from_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
to_,
FAST_ELEMENTS,
grow_mode_);
} else if (IsFastSmiElementsKind(from_) &&
IsFastDoubleElementsKind(to_)) {
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else {
UNREACHABLE();
}

1
deps/v8/src/code-stubs.h

@ -498,7 +498,6 @@ class ICCompareStub: public CodeStub {
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(state_);
code->set_compare_operation(op_);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }

6
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateSmiOnlyToObject(MacroAssembler* masm);
static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:

20
deps/v8/src/contexts.h

@ -106,7 +106,9 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@ -246,7 +248,9 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
JS_ARRAY_MAPS_INDEX,
SMI_JS_ARRAY_MAP_INDEX,
DOUBLE_JS_ARRAY_MAP_INDEX,
OBJECT_JS_ARRAY_MAP_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@ -369,6 +373,18 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \

53
deps/v8/src/d8.cc

@ -26,8 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Defined when linking against shared lib on Windows.
#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows.
#define V8_SHARED
#endif
@ -316,8 +315,8 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
}
const char kArrayBufferMarkerPropName[] = "_is_array_buffer_";
const char kArrayBufferReferencePropName[] = "_array_buffer_ref_";
const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
static const int kExternalArrayAllocationHeaderSize = 2;
@ -354,11 +353,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
Local<Value> length_value = (args.Length() < 3)
? (first_arg_is_array_buffer
? args[0]->ToObject()->Get(String::New("byteLength"))
? args[0]->ToObject()->Get(String::New("length"))
: args[0])
: args[2];
size_t byteLength = convertToUint(length_value, &try_catch);
size_t length = byteLength;
size_t length = convertToUint(length_value, &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
void* data = NULL;
@ -370,7 +368,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
data = derived_from->GetIndexedPropertiesExternalArrayData();
size_t array_buffer_length = convertToUint(
derived_from->Get(String::New("byteLength")),
derived_from->Get(String::New("length")),
&try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
@ -453,20 +451,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type,
static_cast<int>(length));
array->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(byteLength)), ReadOnly);
if (!is_array_buffer_construct) {
array->Set(String::New("length"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
array->Set(String::New("byteOffset"),
Int32::New(static_cast<int32_t>(offset)), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"),
Int32::New(static_cast<int32_t>(element_size)));
// We currently support 'buffer' property only if constructed from a buffer.
if (first_arg_is_array_buffer) {
array->Set(String::New("buffer"), args[0], ReadOnly);
}
}
return array;
}
@ -834,8 +822,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbuffer"),
FunctionTemplate::New(ReadBuffer));
global_template->Set(String::New("readbinary"),
FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@ -1054,29 +1042,20 @@ static char* ReadChars(const char* name, int* size_out) {
}
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
Handle<Value> Shell::ReadBinary(const Arguments& args) {
String::Utf8Value filename(args[0]);
int length;
int size;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
char* data = ReadChars(*filename, &length);
if (data == NULL) {
char* chars = ReadChars(*filename, &size);
if (chars == NULL) {
return ThrowException(String::New("Error reading file"));
}
Handle<Object> buffer = Object::New();
buffer->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
buffer->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data), kExternalUnsignedByteArray, length);
buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
// We skip checking the string for UTF8 characters and use it raw as
// backing store for the external string with 8-bit characters.
BinaryResource* resource = new BinaryResource(chars, size);
return String::NewExternal(resource);
}

2
deps/v8/src/d8.h

@ -307,7 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<Value> ReadBinary(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin();

32
deps/v8/src/debug-agent.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -247,7 +247,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received == 0) {
if (received <= 0) {
PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>();
}
@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
bool ok;
int len;
int r;
// Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion());
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
}
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// No body for connect message.
@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
if (received == 0) {
if (received <= 0) {
return total_received;
}
total_received += received;

42
deps/v8/src/debug.cc

@ -892,16 +892,6 @@ void Debug::Iterate(ObjectVisitor* v) {
}
void Debug::PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end) {
OS::Abort();
}
Object* Debug::Break(Arguments args) {
Heap* heap = isolate_->heap();
HandleScope scope(isolate_);
@ -994,34 +984,11 @@ Object* Debug::Break(Arguments args) {
// Count frames until target frame
int count = 0;
JavaScriptFrameIterator it(isolate_);
while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
count++;
it.Advance();
}
// Catch the cases that would lead to crashes and capture
// - C entry FP at which to start stack crawl.
// - FP of the frame at which we plan to stop stepping out (last FP).
// - current FP that's larger than last FP.
// - Counter for the number of steps to step out.
if (it.done()) {
// We crawled the entire stack, never reaching last_fp_.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
NULL,
count,
0xFEEEEEEE);
} else if (it.frame()->fp() != thread_local_.last_fp_) {
// We crawled over last_fp_, without getting a match.
PutValuesOnStackAndDie(0xBEEEEEEE,
frame->fp(),
thread_local_.last_fp_,
it.frame()->fp(),
count,
0xFEEEEEEE);
}
// If we found original frame
if (it.frame()->fp() == thread_local_.last_fp_) {
if (step_count > 1) {
@ -2260,13 +2227,6 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
}
const int Debug::FramePaddingLayout::kInitialSize = 1;
// Any even value bigger than kInitialSize as needed for stack scanning.
const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == debug_context()->global();
}

50
deps/v8/src/debug.h

@ -232,12 +232,6 @@ class Debug {
void PreemptionWhileInDebugger();
void Iterate(ObjectVisitor* v);
NO_INLINE(void PutValuesOnStackAndDie(int start,
Address c_entry_fp,
Address last_fp,
Address larger_fp,
int count,
int end));
Object* Break(Arguments args);
void SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
@ -463,50 +457,6 @@ class Debug {
// Architecture-specific constant.
static const bool kFrameDropperSupported;
/**
* Defines layout of a stack frame that supports padding. This is a regular
* internal frame that has a flexible stack structure. LiveEdit can shift
* its lower part up the stack, taking up the 'padding' space when additional
* stack memory is required.
* Such frame is expected immediately above the topmost JavaScript frame.
*
* Stack Layout:
* --- Top
* LiveEdit routine frames
* ---
* C frames of debug handler
* ---
* ...
* ---
* An internal frame that has n padding words:
* - any number of words as needed by code -- upper part of frame
* - padding size: a Smi storing n -- current size of padding
* - padding: n words filled with kPaddingValue in form of Smi
* - 3 context/type words of a regular InternalFrame
* - fp
* ---
* Topmost JavaScript frame
* ---
* ...
* --- Bottom
*/
class FramePaddingLayout : public AllStatic {
public:
// Architecture-specific constant.
static const bool kIsSupported;
// A size of frame base including fp. Padding words starts right above
// the base.
static const int kFrameBaseSize = 4;
// A number of words that should be reserved on stack for the LiveEdit use.
// Normally equals 1. Stored on stack in form of Smi.
static const int kInitialSize;
// A value that padding words are filled with (in form of Smi). Going
// bottom-top, the first word not having this value is a counter word.
static const int kPaddingValue;
};
private:
explicit Debug(Isolate* isolate);
~Debug();

134
deps/v8/src/elements-kind.cc

@ -1,134 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "elements-kind.h"
#include "api.h"
#include "elements.h"
#include "objects.h"
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
ElementsKind GetInitialFastElementsKind() {
if (FLAG_packed_arrays) {
return FAST_SMI_ELEMENTS;
} else {
return FAST_HOLEY_SMI_ELEMENTS;
}
}
struct InitializeFastElementsKindSequence {
static void Construct(
ElementsKind** fast_elements_kind_sequence_ptr) {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
}
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
ASSERT(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
return i;
}
}
UNREACHABLE();
return 0;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
int index =
GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
elements_kind = GetFastElementsKindFromSequenceIndex(index);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
}
UNREACHABLE();
return TERMINAL_FAST_ELEMENTS_KIND;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS;
case FAST_HOLEY_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS;
case FAST_DOUBLE_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS &&
to_kind != FAST_DOUBLE_ELEMENTS;
case FAST_HOLEY_DOUBLE_ELEMENTS:
return to_kind == FAST_ELEMENTS ||
to_kind == FAST_HOLEY_ELEMENTS;
case FAST_ELEMENTS:
return to_kind == FAST_HOLEY_ELEMENTS;
case FAST_HOLEY_ELEMENTS:
return false;
default:
return false;
}
}
} } // namespace v8::internal

210
deps/v8/src/elements-kind.h

@ -1,210 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
#include "v8checks.h"
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
FAST_HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastSmiElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS;
}
inline bool IsFastObjectElementsKind(ElementsKind kind) {
return kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastHoleyElementsKind(ElementsKind kind) {
return kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsHoleyElementsKind(ElementsKind kind) {
return IsFastHoleyElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
return FAST_SMI_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
return FAST_DOUBLE_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_ELEMENTS) {
return FAST_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == FAST_SMI_ELEMENTS) {
return FAST_HOLEY_SMI_ELEMENTS;
}
if (packed_kind == FAST_DOUBLE_ELEMENTS) {
return FAST_HOLEY_DOUBLE_ELEMENTS;
}
if (packed_kind == FAST_ELEMENTS) {
return FAST_HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
ASSERT(IsFastSmiElementsKind(from_kind));
return (from_kind == FAST_SMI_ELEMENTS)
? FAST_ELEMENTS
: FAST_HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsFastSmiElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind));
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed);
inline bool CanTransitionToMoreGeneralFastElementsKind(
ElementsKind elements_kind,
bool allow_only_packed) {
return IsFastElementsKind(elements_kind) &&
(elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
(!allow_only_packed || elements_kind != FAST_ELEMENTS));
}
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_

400
deps/v8/src/elements.cc

@ -39,14 +39,8 @@
// Inheritance hierarchy:
// - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract)
// - FastSmiOrObjectElementsAccessor
// - FastPackedSmiElementsAccessor
// - FastHoleySmiElementsAccessor
// - FastPackedObjectElementsAccessor
// - FastHoleyObjectElementsAccessor
// - FastObjectElementsAccessor
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor
@ -71,15 +65,9 @@ namespace internal {
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
FixedArray) \
V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@ -151,6 +139,8 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -158,7 +148,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -170,15 +160,12 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
@ -203,7 +190,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
// Fast object arrays cannot be uninitialized. Ensure they are already
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -213,7 +200,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif
}
ASSERT(to != from);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == 0) return;
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
@ -229,7 +216,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start);
}
}
if (IsFastObjectElementsKind(to_kind)) {
if (to_kind == FAST_ELEMENTS) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
@ -247,7 +234,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -255,7 +242,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -268,14 +255,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(IsFastObjectElementsKind(to_kind));
// Because Double -> Object elements transitions allocate HeapObjects
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
@ -417,38 +404,6 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
static void ValidateContents(JSObject* holder, int length) {
}
static void ValidateImpl(JSObject* holder) {
FixedArrayBase* fixed_array_base = holder->elements();
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->raw_unchecked_one_pointer_filler_map() ||
map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::cast(length_obj)->value();
}
} else {
length = fixed_array_base->length();
}
ElementsAccessorSubclass::ValidateContents(holder, length);
}
virtual void Validate(JSObject* holder) {
ElementsAccessorSubclass::ValidateImpl(holder);
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@ -469,7 +424,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
virtual MaybeObject* Get(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
@ -480,7 +435,7 @@ class ElementsAccessorBase : public ElementsAccessor {
receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
BackingStore* backing_store) {
@ -489,19 +444,17 @@ class ElementsAccessorBase : public ElementsAccessor {
: backing_store->GetHeap()->the_hole_value();
}
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
array, length, BackingStore::cast(array->elements()));
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
BackingStore* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
@ -510,19 +463,18 @@ class ElementsAccessorBase : public ElementsAccessor {
length);
}
MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
JSObject* obj,
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
int capacity,
int length) {
UNIMPLEMENTED();
return obj;
}
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
@ -532,7 +484,7 @@ class ElementsAccessorBase : public ElementsAccessor {
return NULL;
}
MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
@ -549,8 +501,7 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, to, to_kind, to_start, copy_size);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
Object* receiver,
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
JSObject* holder,
FixedArray* to,
FixedArrayBase* from) {
@ -669,7 +620,6 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
@ -680,21 +630,10 @@ class FastElementsAccessor
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
bool same_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastSmiOrObjectElements()) {
if (array->HasFastTypeElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj;
}
@ -726,40 +665,39 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result;
array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
return array->GetHeap()->undefined_value();
}
};
class FastObjectElementsAccessor
: public FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize> {
public:
explicit FastObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
uint32_t key) {
ASSERT(obj->HasFastElements() ||
obj->HasFastSmiOnlyElements() ||
obj->HasFastArgumentsElements());
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
backing_store =
KindTraits::BackingStore::cast(
FixedArray::cast(backing_store)->get(1));
backing_store = FixedArray::cast(backing_store->get(1));
} else {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
MaybeObject* transitioned =
obj->TransitionElementsKind(GetHoleyElementsKind(kind));
if (transitioned->IsFailure()) return transitioned;
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
Object* writable;
MaybeObject* maybe = obj->EnsureWritableFastElements();
if (!maybe->ToObject(&writable)) return maybe;
backing_store = KindTraits::BackingStore::cast(writable);
}
backing_store = FixedArray::cast(writable);
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
@ -771,14 +709,15 @@ class FastElementsAccessor
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
((key > 0 && backing_store->get(key - 1) == hole) ||
(key + 1 < length && backing_store->get(key + 1) == hole))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
if (!backing_store->is_the_hole(i)) ++num_used;
if (backing_store->get(i) != hole) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
@ -791,74 +730,26 @@ class FastElementsAccessor
return heap->true_value();
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
static bool HasElementImpl(
Object* receiver,
JSObject* holder,
uint32_t key,
typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
#if DEBUG
FixedArrayBase* elements = holder->elements();
Heap* heap = elements->GetHeap();
Map* map = elements->map();
ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
(map == heap->fixed_array_map() ||
map == heap->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(KindTraits::Kind) ==
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
backing_store->is_the_hole(i)));
}
#endif
}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize> {
public:
explicit FastSmiOrObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize>(name) {}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
if (IsFastSmiOrObjectElementsKind(to_kind)) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
CopyObjectToObjectElements(
FixedArray::cast(from), KindTraits::Kind, from_start,
FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) {
return from;
}
case FAST_DOUBLE_ELEMENTS:
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
} else {
return from;
default:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
@ -868,85 +759,51 @@ class FastSmiOrObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
JSObject::SetFastElementsCapacityMode set_capacity_mode =
obj->HasFastSmiOnlyElements()
? JSObject::kAllowSmiOnlyElements
: JSObject::kDontAllowSmiOnlyElements;
return obj->SetFastElementsCapacityAndLength(capacity,
length,
set_capacity_mode);
}
};
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> > {
public:
explicit FastPackedSmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
};
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
public:
explicit FastHoleySmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
};
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> > {
public:
explicit FastPackedObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> >(name) {}
};
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>;
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
public:
explicit FastHoleyObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key);
}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastDoubleElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
: public FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
: FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity,
length);
return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
}
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
friend class FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
@ -954,15 +811,12 @@ class FastDoubleElementsAccessor
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
@ -972,35 +826,26 @@ class FastDoubleElementsAccessor
}
return to->GetHeap()->undefined_value();
}
};
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
explicit FastPackedDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
};
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
int length = obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: FixedDoubleArray::cast(obj->elements())->length();
if (key < static_cast<uint32_t>(length)) {
FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
}
return obj->GetHeap()->true_value();
}
class FastHoleyDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
explicit FastHoleyDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
return key < static_cast<uint32_t>(backing_store->length()) &&
!backing_store->is_the_hole(key);
}
};
@ -1021,7 +866,7 @@ class ExternalElementsAccessor
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ElementsKindTraits<Kind> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
BackingStore* backing_store) {
@ -1031,8 +876,7 @@ class ExternalElementsAccessor
: backing_store->GetHeap()->undefined_value();
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
BackingStore* backing_store) {
// External arrays do not support changing their length.
@ -1040,7 +884,7 @@ class ExternalElementsAccessor
return obj;
}
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
// External arrays always ignore deletes.
@ -1158,8 +1002,7 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
SeededNumberDictionary* dict,
static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
@ -1214,8 +1057,7 @@ class DictionaryElementsAccessor
return length_object;
}
MUST_USE_RESULT static MaybeObject* DeleteCommon(
JSObject* obj,
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
Isolate* isolate = obj->GetIsolate();
@ -1260,23 +1102,20 @@ class DictionaryElementsAccessor
return heap->true_value();
}
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
@ -1292,14 +1131,13 @@ class DictionaryElementsAccessor
friend class ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
MUST_USE_RESULT static MaybeObject* GetImpl(
Object* receiver,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
SeededNumberDictionary* backing_store) {
@ -1348,7 +1186,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
FixedArray* parameter_map) {
@ -1378,8 +1216,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
@ -1388,7 +1225,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
return obj;
}
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
@ -1403,16 +1240,13 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
// It's difficult to access the version of DeleteCommon that is declared
// in the templatized super class, call the concrete implementation in
// the class for the most generalized ElementsKind subclass.
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
return FastObjectElementsAccessor::DeleteCommon(obj, key);
}
}
return obj->GetHeap()->true_value();
}
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
@ -1470,7 +1304,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS];
} else {
return elements_accessors_[FAST_HOLEY_ELEMENTS];
return elements_accessors_[FAST_ELEMENTS];
}
case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
@ -1520,7 +1354,7 @@ void ElementsAccessor::TearDown() {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,

22
deps/v8/src/elements.h

@ -28,7 +28,6 @@
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
#include "elements-kind.h"
#include "objects.h"
#include "heap.h"
#include "isolate.h"
@ -46,10 +45,6 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
@ -65,8 +60,7 @@ class ElementsAccessor {
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual MaybeObject* Get(
Object* receiver,
virtual MaybeObject* Get(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
@ -76,7 +70,7 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
virtual MaybeObject* SetLength(JSArray* holder,
Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
@ -85,12 +79,12 @@ class ElementsAccessor {
// elements. This method should only be called for array expansion OR by
// runtime JavaScript code that use InternalArrays and don't care about
// EcmaScript 5.1 semantics.
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@ -107,8 +101,7 @@ class ElementsAccessor {
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
MUST_USE_RESULT virtual MaybeObject* CopyElements(
JSObject* source_holder,
virtual MaybeObject* CopyElements(JSObject* source_holder,
uint32_t source_start,
FixedArrayBase* destination,
ElementsKind destination_kind,
@ -116,7 +109,7 @@ class ElementsAccessor {
int copy_size,
FixedArrayBase* source = NULL) = 0;
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
ElementsKind to_kind,
FixedArrayBase* from = NULL) {
@ -124,8 +117,7 @@ class ElementsAccessor {
kCopyToEndAndInitializeToHole, from);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
Object* receiver,
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
JSObject* holder,
FixedArray* to,
FixedArrayBase* from = NULL) = 0;

5
deps/v8/src/factory.cc

@ -775,7 +775,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type,
instance_size,
GetInitialFastElementsKind());
FAST_SMI_ONLY_ELEMENTS);
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@ -1013,11 +1013,10 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainElements(*elements, length, mode));
array->EnsureCanContainElements(*elements, mode));
}

13
deps/v8/src/factory.h

@ -216,10 +216,9 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
Handle<Map> NewMap(
InstanceType type,
Handle<Map> NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
ElementsKind elements_kind = FAST_ELEMENTS);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -270,14 +269,13 @@ class Factory {
Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(
int capacity,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
Handle<JSArray> NewJSArray(int capacity,
ElementsKind elements_kind = FAST_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
ElementsKind elements_kind = FAST_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array,
@ -289,7 +287,6 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);

3
deps/v8/src/flag-definitions.h

@ -150,7 +150,6 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@ -198,8 +197,6 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")

3
deps/v8/src/frames.h

@ -211,9 +211,6 @@ class StackFrame BASE_EMBEDDED {
virtual void SetCallerFp(Address caller_fp) = 0;
// Manually changes value of fp in this object.
void UpdateFp(Address fp) { state_.fp = fp; }
Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame.

2
deps/v8/src/func-name-inferrer.h

@ -88,8 +88,6 @@ class FuncNameInferrer : public ZoneObject {
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
if (entries_stack_.is_empty())
funcs_to_infer_.Clear();
}
private:

3
deps/v8/src/globals.h

@ -345,9 +345,6 @@ F FUNCTION_CAST(Address addr) {
#define INLINE(header) inline __attribute__((always_inline)) header
#define NO_INLINE(header) __attribute__((noinline)) header
#endif
#elif defined(_MSC_VER) && !defined(DEBUG)
#define INLINE(header) __forceinline header
#define NO_INLINE(header) header
#else
#define INLINE(header) inline header
#define NO_INLINE(header) header

20
deps/v8/src/heap-inl.h

@ -595,24 +595,12 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
ASSERT(heap_->InNewSpace(new_space_strings_[i]));
ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
}
#endif
}

70
deps/v8/src/heap.cc

@ -171,9 +171,6 @@ Heap::Heap()
global_contexts_list_ = NULL;
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
@ -808,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
@ -2023,7 +2020,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_pre_allocated_property_fields(0);
map->init_instance_descriptors();
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->init_prototype_transitions(undefined_value());
map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
@ -2162,15 +2159,15 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->init_instance_descriptors();
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->init_prototype_transitions(undefined_value());
meta_map()->set_prototype_transitions(empty_fixed_array());
fixed_array_map()->init_instance_descriptors();
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->init_prototype_transitions(undefined_value());
fixed_array_map()->set_prototype_transitions(empty_fixed_array());
oddball_map()->init_instance_descriptors();
oddball_map()->set_code_cache(empty_fixed_array());
oddball_map()->init_prototype_transitions(undefined_value());
oddball_map()->set_prototype_transitions(empty_fixed_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@ -2469,7 +2466,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
new_neander_map->set_elements_kind(FAST_ELEMENTS);
set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@ -3050,7 +3047,6 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
}
JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
@ -3327,8 +3323,6 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException();
}
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3754,7 +3748,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
ASSERT(JSObject::cast(result)->HasFastObjectElements());
ASSERT(JSObject::cast(result)->HasFastElements());
return result;
}
@ -3799,7 +3793,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
ASSERT(map->has_fast_object_elements());
ASSERT(map->has_fast_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@ -3916,7 +3910,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
JSObject::cast(obj)->HasFastElements());
return obj;
}
@ -3961,9 +3956,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@ -3984,7 +3976,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
}
} else {
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
ASSERT(elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_SMI_ONLY_ELEMENTS);
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity);
} else {
@ -4010,7 +4003,6 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array;
}
@ -4495,16 +4487,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
#ifdef DEBUG
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif // DEBUG
return result;
}
@ -4551,13 +4533,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map();
Object* maybe_map_array = global_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(elements_kind);
if (!maybe_transitioned_map->IsUndefined()) {
map = Map::cast(maybe_transitioned_map);
}
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
map = Map::cast(global_context->double_js_array_map());
} else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
map = Map::cast(global_context->object_js_array_map());
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(map == global_context->smi_js_array_map());
}
return AllocateJSObjectFromMap(map, pretenure);
@ -4842,7 +4824,9 @@ MaybeObject* Heap::AllocateGlobalContext() {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map());
context->set_js_array_maps(undefined_value());
context->set_smi_js_array_map(undefined_value());
context->set_double_js_array_map(undefined_value());
context->set_object_js_array_map(undefined_value());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@ -5826,6 +5810,16 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()

20
deps/v8/src/heap.h

@ -621,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
ElementsKind elements_kind = FAST_ELEMENTS);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@ -1342,7 +1342,7 @@ class Heap {
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
return PromotedSpaceSize() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
@ -1363,6 +1363,19 @@ class Heap {
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit =
@ -1455,7 +1468,7 @@ class Heap {
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
return false;
}
@ -1493,6 +1506,7 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }

33
deps/v8/src/hydrogen-instructions.cc

@ -1603,7 +1603,6 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
SetOperandAt(1, object);
set_representation(Representation::Tagged());
SetGVNFlag(kDependsOnMaps);
int map_transitions = 0;
for (int i = 0;
i < types->length() && types_.length() < kMaxLoadPolymorphism;
++i) {
@ -1625,20 +1624,13 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
case CONSTANT_FUNCTION:
types_.Add(types->at(i));
break;
case MAP_TRANSITION:
// We should just ignore these since they are not relevant to a load
// operation. This means we will deopt if we actually see this map
// from optimized code.
map_transitions++;
break;
default:
break;
}
}
}
if (types_.length() + map_transitions == types->length() &&
FLAG_deoptimize_uncommon_cases) {
if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
SetFlag(kUseGVN);
} else {
SetAllSideEffects();
@ -1685,9 +1677,6 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
if (hole_check_mode_ == PERFORM_HOLE_CHECK) {
stream->Add(" check_hole");
}
}
@ -1739,7 +1728,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
OMIT_HOLE_CHECK);
HLoadKeyedFastElement::OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
map_check->InsertBefore(this);
@ -1787,11 +1776,8 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1888,12 +1874,9 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1908,13 +1891,7 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
ElementsKind from_kind = original_map()->elements_kind();
ElementsKind to_kind = transitioned_map()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
*original_map(),
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}

164
deps/v8/src/hydrogen-instructions.h

@ -2083,21 +2083,28 @@ class HCheckMaps: public HTemplateInstruction<2> {
HCheckMaps* check_map = new HCheckMaps(object, map);
SmallMapList* map_set = check_map->map_set();
// Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
// If the map to check has the untransitioned elements, it can be hoisted
// above TransitionElements instructions.
if (map->has_fast_smi_only_elements()) {
check_map->ClearGVNFlag(kDependsOnElementsKind);
}
ElementsKind kind = map->elements_kind();
bool packed = IsFastPackedElementsKind(kind);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
Map* transitioned_map =
map->LookupElementsTransitionMap(kind, NULL);
if (transitioned_map) {
map_set->Add(Handle<Map>(transitioned_map));
Map* transitioned_fast_element_map =
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
map_set->Add(Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map =
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
map_set->Add(Handle<Map>(transitioned_double_map));
}
};
map_set->Sort();
return check_map;
}
@ -3939,28 +3946,15 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; }
};
class ArrayInstructionInterface {
public:
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
};
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
class HLoadKeyedFastElement: public HTemplateInstruction<2> {
public:
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: hole_check_mode_(hole_check_mode),
index_offset_(0),
is_dehoisted_(false) {
: hole_check_mode_(hole_check_mode) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
@ -3970,12 +3964,6 @@ class HLoadKeyedFastElement
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -3994,28 +3982,17 @@ class HLoadKeyedFastElement
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
return false;
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
HoleCheckMode hole_check_mode_;
uint32_t index_offset_;
bool is_dehoisted_;
};
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
public:
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
@ -4025,12 +4002,6 @@ class HLoadKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -4039,38 +4010,21 @@ class HLoadKeyedFastDoubleElement
: Representation::Integer32();
}
bool RequiresHoleCheck() {
return hole_check_mode_ == PERFORM_HOLE_CHECK;
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastDoubleElement()) return false;
HLoadKeyedFastDoubleElement* other_load =
HLoadKeyedFastDoubleElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
uint32_t index_offset_;
bool is_dehoisted_;
HoleCheckMode hole_check_mode_;
virtual bool DataEquals(HValue* other) { return true; }
};
class HLoadKeyedSpecializedArrayElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
ElementsKind elements_kind)
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
: elements_kind_(elements_kind) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@ -4098,12 +4052,6 @@ class HLoadKeyedSpecializedArrayElement
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone);
@ -4119,8 +4067,6 @@ class HLoadKeyedSpecializedArrayElement
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -4242,12 +4188,11 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
class HStoreKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
class HStoreKeyedFastElement: public HTemplateInstruction<3> {
public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
: elements_kind_(elements_kind) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4265,14 +4210,8 @@ class HStoreKeyedFastElement
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
bool value_is_smi() {
return IsFastSmiElementsKind(elements_kind_);
return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
}
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
@ -4288,18 +4227,14 @@ class HStoreKeyedFastElement
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedFastDoubleElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
public:
HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key,
HValue* val)
: index_offset_(0), is_dehoisted_(false) {
HValue* val) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4319,12 +4254,6 @@ class HStoreKeyedFastDoubleElement
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
@ -4335,21 +4264,16 @@ class HStoreKeyedFastDoubleElement
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
private:
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedSpecializedArrayElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* val,
ElementsKind elements_kind)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
: elements_kind_(elements_kind) {
SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
@ -4377,19 +4301,11 @@ class HStoreKeyedSpecializedArrayElement
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -4436,19 +4352,9 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
// Don't set GVN DependOn flags here. That would defeat GVN's detection of
// congruent HTransitionElementsKind instructions. Instruction hoisting
// handles HTransitionElementsKind instruction specially, explicitly adding
// DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind);
if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@ -4686,7 +4592,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) {
return TERMINAL_FAST_ELEMENTS_KIND;
return FAST_ELEMENTS;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}

313
deps/v8/src/hydrogen.cc

@ -1709,23 +1709,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the
// hoisting wouldn't move the transition past an instruction that has a
// DependsOn flag for anything it changes.
// hoisting wouldn't move the transition past a DependsOn of one of it's
// changes or any instructions that might change an objects map or
// elements contents.
GVNFlagSet changes = instr->ChangesFlags();
GVNFlagSet hoist_depends_blockers =
HValue::ConvertChangesToDependsFlags(instr->ChangesFlags());
// In addition, the transition must not be hoisted above elements kind
// changes, or if the transition is destructive to the elements buffer,
// changes to array pointer or array contents.
GVNFlagSet hoist_change_blockers;
hoist_change_blockers.Add(kChangesElementsKind);
HValue::ConvertChangesToDependsFlags(changes);
// In addition to not hoisting transitions above other instructions that
// change dependencies that the transition changes, it must not be
// hoisted above map changes and stores to an elements backing store
// that the transition might change.
GVNFlagSet hoist_change_blockers = changes;
hoist_change_blockers.Add(kChangesMaps);
HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
if (trans->original_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesDoubleArrayElements);
}
if (trans->transitioned_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesArrayElements);
}
if (FLAG_trace_gvn) {
@ -2758,7 +2758,6 @@ HGraph* HGraphBuilder::CreateGraph() {
sce.Process();
graph()->EliminateRedundantBoundsChecks();
graph()->DehoistSimpleArrayIndexComputations();
return graph();
}
@ -3017,6 +3016,7 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
HBoundsCheck* check = HBoundsCheck::cast(i);
check->ReplaceAllUsesWith(check->index());
isolate()->counters()->array_bounds_checks_seen()->Increment();
if (!FLAG_array_bounds_checks_elimination) continue;
int32_t offset;
@ -3035,8 +3035,10 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
check->DeleteAndReplaceWith(NULL);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else if (data->BasicBlock() == bb) {
data->CoverCheck(check, offset);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
@ -3080,93 +3082,6 @@ void HGraph::EliminateRedundantBoundsChecks() {
}
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey();
HConstant* constant;
HValue* subexpression;
int32_t sign;
if (index->IsAdd()) {
sign = 1;
HAdd* add = HAdd::cast(index);
if (add->left()->IsConstant()) {
subexpression = add->right();
constant = HConstant::cast(add->left());
} else if (add->right()->IsConstant()) {
subexpression = add->left();
constant = HConstant::cast(add->right());
} else {
return;
}
} else if (index->IsSub()) {
sign = -1;
HSub* sub = HSub::cast(index);
if (sub->left()->IsConstant()) {
subexpression = sub->right();
constant = HConstant::cast(sub->left());
} else if (sub->right()->IsConstant()) {
subexpression = sub->left();
constant = HConstant::cast(sub->right());
} return;
} else {
return;
}
if (!constant->HasInteger32Value()) return;
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
if (value >= 1 << 30 || value < 0) return;
array_operation->SetKey(subexpression);
if (index->HasNoUses()) {
index->DeleteAndReplaceWith(NULL);
}
ASSERT(value >= 0);
array_operation->SetIndexOffset(static_cast<uint32_t>(value));
array_operation->SetDehoisted(true);
}
void HGraph::DehoistSimpleArrayIndexComputations() {
if (!FLAG_array_index_dehoisting) return;
HPhase phase("H_Dehoist index computations", this);
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
instr != NULL;
instr = instr->next()) {
ArrayInstructionInterface* array_instruction = NULL;
if (instr->IsLoadKeyedFastElement()) {
HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedFastDoubleElement()) {
HLoadKeyedFastDoubleElement* op =
HLoadKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedSpecializedArrayElement()) {
HLoadKeyedSpecializedArrayElement* op =
HLoadKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastElement()) {
HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastDoubleElement()) {
HStoreKeyedFastDoubleElement* op =
HStoreKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedSpecializedArrayElement()) {
HStoreKeyedSpecializedArrayElement* op =
HStoreKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else {
continue;
}
DehoistArrayIndex(array_instruction);
}
}
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
@ -3966,7 +3881,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
OMIT_HOLE_CHECK));
HLoadKeyedFastElement::OMIT_HOLE_CHECK));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@ -4257,7 +4172,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastObjectElements()) {
} else if (boilerplate->HasFastElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@ -4464,13 +4379,11 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Representation::Integer32()));
switch (boilerplate_elements_kind) {
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(value));
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastElement(
elements,
key,
@ -4478,7 +4391,6 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind));
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
key,
value));
@ -5236,12 +5148,9 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -5266,16 +5175,13 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
ASSERT(val != NULL);
switch (elements_kind) {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(val));
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
default:
@ -5284,13 +5190,10 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
}
}
// It's an element load (!is_store).
HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ?
OMIT_HOLE_CHECK :
PERFORM_HOLE_CHECK;
if (IsFastDoubleElementsKind(elements_kind)) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
} else { // Smi or Object elements.
return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
}
}
@ -5298,30 +5201,15 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store) {
HInstruction* mapcheck =
AddInstruction(new(zone()) HCheckMaps(object, map, dependency));
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
// ElementsKind transitions. Finally, the dependency can be removed for stores
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if (dependency ||
(map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
(map->elements_kind() == FAST_ELEMENTS && is_store)) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
bool fast_smi_only_elements = map->has_fast_smi_elements();
bool fast_elements = map->has_fast_object_elements();
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map()));
}
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
@ -5374,8 +5262,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind) &&
elements_kind != GetInitialFastElementsKind()) {
if (elements_kind == FAST_DOUBLE_ELEMENTS ||
elements_kind == FAST_ELEMENTS) {
possible_transitioned_maps.Add(map);
}
}
@ -5389,17 +5277,12 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map;
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
ASSERT(Map::IsValidElementsTransition(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
transition = new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i));
AddInstruction(transition);
AddInstruction(new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i)));
} else {
type_todo[map->elements_kind()] = true;
if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
@ -5419,7 +5302,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key));
} else {
instr = AddInstruction(BuildMonomorphicElementAccess(
object, key, val, transition, untransitionable_map, is_store));
object, key, val, untransitionable_map, is_store));
}
*has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position);
@ -5436,18 +5319,20 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
// Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
// are handled before external arrays.
STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
// Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
// arrays.
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
// code that's executed for all external array cases.
// After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
// that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -5469,8 +5354,10 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
if (IsFastElementsKind(elements_kind)) {
if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
@ -5557,7 +5444,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
: BuildLoadKeyedGeneric(obj, key);
} else {
AddInstruction(new(zone()) HCheckNonSmi(obj));
instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
}
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
@ -5775,39 +5662,6 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
}
class FunctionSorter {
public:
FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
FunctionSorter(int index, int ticks, int ast_length, int src_length)
: index_(index),
ticks_(ticks),
ast_length_(ast_length),
src_length_(src_length) { }
int index() const { return index_; }
int ticks() const { return ticks_; }
int ast_length() const { return ast_length_; }
int src_length() const { return src_length_; }
private:
int index_;
int ticks_;
int ast_length_;
int src_length_;
};
static int CompareHotness(void const* a, void const* b) {
FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
int diff = function1->ticks() - function2->ticks();
if (diff != 0) return -diff;
diff = function1->ast_length() - function2->ast_length();
if (diff != 0) return diff;
return function1->src_length() - function2->src_length();
}
void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HValue* receiver,
SmallMapList* types,
@ -5816,35 +5670,17 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
int argument_count = expr->arguments()->length() + 1; // Includes receiver.
int count = 0;
HBasicBlock* join = NULL;
FunctionSorter order[kMaxCallPolymorphism];
int ordered_functions = 0;
for (int i = 0;
i < types->length() && ordered_functions < kMaxCallPolymorphism;
++i) {
for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
order[ordered_functions++] =
FunctionSorter(i,
expr->target()->shared()->profiler_ticks(),
InliningAstSize(expr->target()),
expr->target()->shared()->SourceSize());
}
}
qsort(reinterpret_cast<void*>(&order[0]),
ordered_functions,
sizeof(order[0]),
&CompareHotness);
for (int fn = 0; fn < ordered_functions; ++fn) {
int i = order[fn].index();
Handle<Map> map = types->at(i);
if (fn == 0) {
if (count == 0) {
// Only needed once.
AddInstruction(new(zone()) HCheckNonSmi(receiver));
join = graph()->CreateBasicBlock();
}
++count;
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
HCompareMap* compare =
@ -5852,15 +5688,10 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
current_block()->Finish(compare);
set_current_block(if_true);
expr->ComputeTarget(map, name);
AddCheckConstantFunction(expr, receiver, map, false);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
Handle<JSFunction> caller = info()->closure();
SmartArrayPointer<char> caller_name =
caller->shared()->DebugName()->ToCString();
PrintF("Trying to inline the polymorphic call to %s from %s\n",
*name->ToCString(),
*caller_name);
PrintF("Trying to inline the polymorphic call to %s\n",
*name->ToCString());
}
if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the
@ -5878,11 +5709,12 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
if (current_block() != NULL) current_block()->Goto(join);
set_current_block(if_false);
}
}
// Finish up. Unconditionally deoptimize if we've handled all the maps we
// know about and do not want to handle ones we've never seen. Otherwise
// use a generic IC.
if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
} else {
HValue* context = environment()->LookupContext();
@ -5931,11 +5763,14 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
}
static const int kNotInlinable = 1000000000;
int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
HValue* receiver,
int ast_id,
int return_id,
ReturnHandlingFlag return_handling) {
if (!FLAG_use_inlining) return false;
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
@ -5947,43 +5782,25 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->SourceSize() >
Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
TraceInline(target, caller, "target text too big");
return kNotInlinable;
return false;
}
// Target must be inlineable.
if (!target->IsInlineable()) {
TraceInline(target, caller, "target not inlineable");
return kNotInlinable;
return false;
}
if (target_shared->dont_inline() || target_shared->dont_optimize()) {
TraceInline(target, caller, "target contains unsupported syntax [early]");
return kNotInlinable;
return false;
}
int nodes_added = target_shared->ast_node_count();
return nodes_added;
}
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
HValue* receiver,
int ast_id,
int return_id,
ReturnHandlingFlag return_handling) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
Handle<JSFunction> caller = info()->closure();
if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
TraceInline(target, caller, "target AST is too large [early]");
return false;
}
Handle<SharedFunctionInfo> target_shared(target->shared());
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
CompilationInfo* outer_info = info();

3
deps/v8/src/hydrogen.h

@ -267,7 +267,6 @@ class HGraph: public ZoneObject {
void AssignDominators();
void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
@ -1011,7 +1010,6 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
@ -1093,7 +1091,6 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,

3
deps/v8/src/ia32/assembler-ia32.h

@ -640,9 +640,6 @@ class Assembler : public AssemblerBase {
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
static const byte kJzShortOpcode = kJccShortPrefix | zero;
// ---------------------------------------------------------------------------
// Code generation

9
deps/v8/src/ia32/builtins-ia32.cc

@ -900,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -1003,8 +1003,7 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
__ LoadInitialArrayMap(array_function, scratch, elements_array);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@ -1275,11 +1274,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ELEMENTS,
FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,

38
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3822,24 +3822,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 9;
static const int kRegExpExecuteArguments = 8;
__ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize),
// Argument 8: Pass current isolate address.
__ mov(Operand(esp, 7 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// Argument 7: Start (high end) of backtracking stack memory area.
// Argument 6: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ mov(Operand(esp, 6 * kPointerSize), esi);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
__ mov(Operand(esp, 5 * kPointerSize), esi);
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
@ -3902,9 +3898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(eax, 1);
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
__ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@ -7063,8 +7057,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@ -7336,9 +7330,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(eax, &smi_element);
__ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
__ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7360,7 +7354,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ jmp(&slow_elements);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@ -7373,15 +7367,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax);
__ ret(0);
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ push(edx);

4
deps/v8/src/ia32/codegen-ia32.cc

@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value

31
deps/v8/src/ia32/debug-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -91,12 +91,10 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
}
// All debug break stubs support padding for LiveEdit.
const bool Debug::FramePaddingLayout::kIsSupported = true;
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
@ -105,13 +103,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
__ push(Immediate(Smi::FromInt(
Debug::FramePaddingLayout::kPaddingValue)));
}
__ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
// Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. Non object values
// are stored as a smi causing it to be untouched by GC.
@ -143,10 +134,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
CEntryStub ceb(1);
__ CallStub(&ceb);
// Automatically find register that could be used after register restore.
// We need one register for padding skip instructions.
Register unused_reg = { -1 };
// Restore the register values containing object pointers from the
// expression stack.
for (int i = kNumJSCallerSaved; --i >= 0;) {
@ -155,29 +142,15 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
bool taken = reg.code() == esi.code();
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
taken = true;
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
taken = true;
}
if (!taken) {
unused_reg = reg;
}
}
ASSERT(unused_reg.code() != -1);
// Read current padding counter and skip corresponding number of words.
__ pop(unused_reg);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
// Get rid of the internal frame.
}

16
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1649,8 +1649,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_constant_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1661,7 +1660,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@ -1673,9 +1672,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1703,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
if (constant_elements_kind == FAST_ELEMENTS) {
// Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
// transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));

38
deps/v8/src/ia32/ic-ia32.cc

@ -889,25 +889,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
&non_double_value,
DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1622,7 +1622,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
@ -1727,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
void PatchInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@ -1753,18 +1753,14 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
address, test_instruction_address, delta);
}
// Patch with a short conditional jump. Enabling means switching from a short
// jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
// reverse operation of that.
// Patch with a short conditional jump. There must be a
// short jump-if-carry/not-carry at this position.
Address jmp_address = test_instruction_address - delta;
ASSERT((check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode)
: (*jmp_address == Assembler::kJnzShortOpcode ||
*jmp_address == Assembler::kJzShortOpcode));
Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
: (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode);
Condition cc = *jmp_address == Assembler::kJncShortOpcode
? not_zero
: zero;
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}

163
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -2274,35 +2274,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register result = ToRegister(instr->result());
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(no_condition, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
for (int i = 0; i < map_count - 1; ++i) {
Handle<Map> map = instr->hydrogen()->types()->at(i);
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
if (last && !need_generic) {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next;
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
__ j(not_equal, &next, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear);
__ bind(&next);
}
}
if (need_generic) {
Handle<Map> map = instr->hydrogen()->types()->last();
__ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
if (instr->hydrogen()->need_generic()) {
Label generic;
__ j(not_equal, &generic, Label::kNear);
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ jmp(&done, Label::kNear);
__ bind(&generic);
__ mov(ecx, name);
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(not_equal, instr->environment());
EmitLoadFieldOrConstantFunction(result, object, map, name);
}
__ bind(&done);
}
}
@ -2377,10 +2382,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
__ cmp(temp, GetInitialFastElementsKind());
__ j(less, &fail, Label::kNear);
__ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FAST_ELEMENTS);
__ j(equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
@ -2423,11 +2426,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ mov(result,
BuildFastArrayOperand(instr->elements(),
instr->key(),
BuildFastArrayOperand(instr->elements(), instr->key(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
FixedArray::kHeaderSize - kHeapObjectTag));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2441,24 +2442,18 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
offset);
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movdbl(result, double_load_operand);
}
@ -2467,8 +2462,7 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index) {
uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@ -2477,14 +2471,10 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
((constant_value + additional_index) << shift_size)
+ offset);
constant_value * (1 << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
offset + (additional_index << shift_size));
return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
}
}
@ -2493,10 +2483,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(),
elements_kind,
0,
instr->additional_index()));
instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@ -2532,12 +2519,9 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -2941,13 +2925,11 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
} else {
Label negative_sign;
Label done;
// Deoptimize on unordered.
// Deoptimize on negative numbers.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr->environment());
__ j(below, &negative_sign, Label::kNear);
DeoptimizeIf(below, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
@ -2963,21 +2945,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, Operand(input_reg));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
DeoptimizeIf(equal, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here
__ bind(&negative_sign);
// Truncate, then compare and compensate
__ cvttsd2si(output_reg, Operand(input_reg));
__ cvtsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ sub(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr->environment());
__ bind(&done);
}
}
@ -3436,10 +3407,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(),
elements_kind,
0,
instr->additional_index()));
instr->key(), elements_kind, 0));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@ -3463,12 +3431,9 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3483,21 +3448,31 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand(
instr->object(),
instr->key(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ mov(operand, value);
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value);
} else {
__ mov(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key, operand);
__ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements,
key,
value,
@ -3525,11 +3500,8 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ movdbl(double_store_operand, value);
}
@ -3560,23 +3532,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ mov(new_map_reg, to_map);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register object_reg = ToRegister(instr->object());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
@ -4436,9 +4407,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
@ -4600,9 +4570,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,

3
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -242,8 +242,7 @@ class LCodeGen BASE_EMBEDDED {
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
uint32_t offset,
uint32_t additional_index = 0);
uint32_t offset);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);

8
deps/v8/src/ia32/lithium-ia32.cc

@ -1990,7 +1990,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@ -2092,9 +2093,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();

12
deps/v8/src/ia32/lithium-ia32.h

@ -1238,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
LLoadKeyedFastDoubleElement(LOperand* elements,
LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
@ -1255,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1275,7 +1275,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1776,7 +1775,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1799,7 +1797,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1825,7 +1822,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

80
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -382,12 +382,10 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue);
Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
@ -395,26 +393,23 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue);
Map::kMaximumBitField2FastSmiOnlyElementValue);
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleyElementValue);
Map::kMaximumBitField2FastElementValue);
j(above, fail, distance);
}
void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastHoleySmiElementValue);
Map::kMaximumBitField2FastSmiOnlyElementValue);
j(above, fail, distance);
}
@ -498,18 +493,24 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(current_map));
Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_double_map));
}
}
}
@ -2160,38 +2161,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
mov(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, FieldOperand(scratch, offset));
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
j(not_equal, no_map_match);
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
mov(map_in_out, FieldOperand(scratch, offset));
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
@ -2576,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
CodePatcher::CodePatcher(byte* address, int size)
: address_(address),
size_(size),
masm_(NULL, address, size + Assembler::kGap) {
masm_(Isolate::Current(), address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/ia32/macro-assembler-ia32.h

@ -235,8 +235,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out,
bool can_have_holes);
Register map_out);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@ -358,7 +357,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
void CheckFastSmiOnlyElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);

148
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -42,30 +42,28 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - edx : Current character. Must be loaded using LoadCurrentCharacter
* before using any of the dispatch methods. Temporarily stores the
* index of capture start after a matching pass for a global regexp.
* - edi : Current position in input, as negative offset from end of string.
* - edx : current character. Must be loaded using LoadCurrentCharacter
* before using any of the dispatch methods.
* - edi : current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - esi : end of input (points to byte after last character in input).
* - ebp : Frame pointer. Used to access arguments, local variables and
* - ebp : frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - esp : Points to tip of C stack.
* - ecx : Points to tip of backtrack stack
* - esp : points to tip of C stack.
* - ecx : points to tip of backtrack stack
*
* The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
* - Isolate* isolate (address of the current isolate)
* - Isolate* isolate (Address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
* - stack_area_base (high end of the memory area to use as
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (address of end of string)
* - start of input (address of first character in string)
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - start index (character index of start)
* - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
@ -74,10 +72,9 @@ namespace internal {
* - backup of caller esi
* - backup of caller edi
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
*
@ -709,16 +706,13 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
__ Set(eax, Immediate(FAILURE));
}
ASSERT(FAILURE == 0); // Return value for failure is zero.
__ Set(eax, Immediate(0));
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label return_eax;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -737,7 +731,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
@ -757,13 +750,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
__ jmp(&return_eax);
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
__ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_eax);
__ j(not_zero, &exit_label_);
__ bind(&stack_ok);
// Load start index for later use.
@ -790,55 +783,41 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
#ifdef WIN32
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
#endif // WIN32
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ mov(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
__ mov(ecx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
__ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(register_location(i), eax);
}
}
// Ensure that we have written to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
for (int i = num_saved_registers_ + kRegistersPerPage - 1;
i < num_registers_;
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
// Initialize backtrack stack pointer.
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
// Load previous char as initial value of current-character.
Label at_start;
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), '\n');
__ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@ -857,10 +836,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
if (i == 0 && global()) {
// Keep capture start in edx for the zero-length check later.
__ mov(edx, eax);
}
// Convert to index from start of string, not end.
__ add(eax, ecx);
if (mode_ == UC16) {
@ -869,54 +844,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebx, i * kPointerSize), eax);
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ mov(ecx, Operand(ebp, kNumOutputRegisters));
__ sub(ecx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(ecx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ mov(Operand(ebp, kNumOutputRegisters), ecx);
// Advance the location for output.
__ add(Operand(ebp, kRegisterOutput),
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kInputStartMinusOne));
// Special case for zero-length matches.
// edx: capture start index
__ cmp(edi, edx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// edi (offset from the end) is zero if we already reached the end.
__ test(edi, edi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
__ add(edi, Immediate(2));
} else {
__ inc(edi);
}
__ jmp(&load_char_start_regexp);
} else {
__ mov(eax, Immediate(SUCCESS));
}
}
// Exit and return eax
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
__ mov(eax, Operand(ebp, kSuccessfulCaptures));
}
__ bind(&return_eax);
// Skip esp past regexp registers.
__ lea(esp, Operand(ebp, kBackup_ebx));
// Restore callee-save registers.
@ -946,7 +877,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_eax);
__ j(not_zero, &exit_label_);
__ pop(edi);
__ pop(backtrack_stackpointer());
@ -993,7 +924,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(eax, EXCEPTION);
__ jmp(&return_eax);
__ jmp(&exit_label_);
}
CodeDesc code_desc;
@ -1112,9 +1043,8 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
}
bool RegExpMacroAssemblerIA32::Succeed() {
void RegExpMacroAssemblerIA32::Succeed() {
__ jmp(&success_label_);
return global();
}

13
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2008-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -111,7 +111,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -135,11 +135,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
@ -148,8 +144,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;

8
deps/v8/src/ia32/simulator-ia32.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*,
const byte*, int*, int, Address, int, Isolate*);
const byte*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \

27
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1462,31 +1462,16 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
__ CheckFastSmiOnlyElements(ebx, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&try_holey_map);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
ebx,
edi,
&call_builtin);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@ -3833,7 +3818,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
@ -3858,7 +3843,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
if (IsFastSmiElementsKind(elements_kind)) {
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@ -3866,7 +3851,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
times_half_pointer_size,
FixedArray::kHeaderSize), eax);
} else {
ASSERT(IsFastObjectElementsKind(elements_kind));
ASSERT(elements_kind == FAST_ELEMENTS);
// Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size

85
deps/v8/src/ic.cc

@ -352,9 +352,9 @@ void IC::Clear(Address address) {
return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::COMPARE_IC: return CompareIC::Clear(address, target);
case Code::UNARY_OP_IC:
case Code::BINARY_OP_IC:
case Code::COMPARE_IC:
case Code::TO_BOOLEAN_IC:
// Clearing these is tricky and does not
// make any performance difference.
@ -365,8 +365,9 @@ void IC::Clear(Address address) {
void CallICBase::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
State state = target->ic_state();
if (state == UNINITIALIZED) return;
Code* code =
Isolate::Current()->stub_cache()->FindCallInitialize(
target->arguments_count(),
@ -409,17 +410,6 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
}
void CompareIC::Clear(Address address, Code* target) {
// Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
if (target->major_key() != CodeStub::CompareIC) return;
// Only clear CompareICs that can retain objects.
if (target->compare_state() != KNOWN_OBJECTS) return;
Token::Value op = CompareIC::ComputeOperation(target);
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
@ -1644,7 +1634,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_smi_or_object_elements() ||
receiver_map->has_fast_elements() ||
receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@ -1659,7 +1650,8 @@ Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
if (receiver->HasFastSmiOrObjectElements() ||
if (receiver->HasFastElements() ||
receiver->HasFastSmiOnlyElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
@ -1679,26 +1671,15 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_ELEMENTS);
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_DOUBLE_ELEMENTS);
case KeyedIC::LOAD:
case KeyedIC::STORE_NO_TRANSITION:
case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
UNREACHABLE();
break;
}
default:
UNREACHABLE();
return Handle<Map>::null();
}
}
@ -1758,56 +1739,32 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
}
}
if (value->IsHeapObject()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
}
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
}
}
}
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
if (receiver->HasFastSmiElements()) {
if (receiver->HasFastSmiOnlyElements()) {
if (value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_TRANSITION_SMI_TO_DOUBLE;
}
} else if (value->IsHeapObject()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_TRANSITION_SMI_TO_OBJECT;
}
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_TRANSITION_DOUBLE_TO_OBJECT;
}
}
}
return STORE_NO_TRANSITION;
}
}
@ -2439,7 +2396,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// Activate inlined smi code.
if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
PatchInlinedSmiCode(ic.address());
}
}
@ -2500,14 +2457,6 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
Code* CompareIC::GetRawUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
Code* code = NULL;
CHECK(stub.FindCodeInCache(&code));
return code;
}
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode();
@ -2522,12 +2471,6 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
}
Token::Value CompareIC::ComputeOperation(Code* target) {
ASSERT(target->major_key() == CodeStub::CompareIC);
return static_cast<Token::Value>(target->compare_operation());
}
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";

20
deps/v8/src/ic.h

@ -378,16 +378,10 @@ class KeyedIC: public IC {
STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
};
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
@ -800,9 +794,6 @@ class CompareIC: public IC {
// Helper function for determining the state of a compare IC.
static State ComputeState(Code* target);
// Helper function for determining the operation a compare IC is for.
static Token::Value ComputeOperation(Code* target);
static const char* GetStateName(State state);
private:
@ -813,13 +804,7 @@ class CompareIC: public IC {
Condition GetCondition() const { return ComputeCondition(op_); }
State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
static void Clear(Address address, Code* target);
Token::Value op_;
friend class IC;
};
@ -832,8 +817,7 @@ class ToBooleanIC: public IC {
// Helper for BinaryOpIC and CompareIC.
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
void PatchInlinedSmiCode(Address address);
} } // namespace v8::internal

26
deps/v8/src/incremental-marking-inl.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
// If we have queued twice the heap size for rescanning then we are
// going around in circles, scanning the same objects again and again
// as the program mutates the heap faster than we can incrementally
@ -118,29 +118,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit);
WhiteToGrey(obj, mark_bit);
marking_deque_.PushGrey(obj);
}
bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
WhiteToGreyAndPush(obj, mark_bit);
return true;
}
return false;
}
bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
return true;
}
return false;
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit);
}

43
deps/v8/src/incremental-marking.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -42,7 +42,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
marker_(this, heap->mark_compact_collector()),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@ -664,22 +663,6 @@ void IncrementalMarking::Hurry() {
} else if (map == global_context_map) {
// Global contexts have weak fields.
VisitGlobalContext(Context::cast(obj), &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else {
obj->Iterate(&marking_visitor);
}
@ -824,6 +807,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
Map* map = obj->map();
if (map == filler_map) continue;
if (obj->IsMap()) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
}
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@ -841,22 +830,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
VisitGlobalContext(ctx, &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else if (map->instance_type() == JS_FUNCTION_TYPE) {
marking_visitor.VisitPointers(
HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
@ -978,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() {
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
}
} } // namespace v8::internal

15
deps/v8/src/incremental-marking.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -154,6 +154,8 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
// Does white->black or keeps gray or black color. Returns true if converting
// white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@ -167,16 +169,6 @@ class IncrementalMarking {
return true;
}
// Marks the object grey and pushes it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
inline int steps_count() {
return steps_count_;
}
@ -268,7 +260,6 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;

2
deps/v8/src/isolate.h

@ -965,7 +965,7 @@ class Isolate {
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 128;
static const int kJSRegexpStaticOffsetsVectorSize = 50;
Address external_callback() {
return thread_local_top_.external_callback_;

88
deps/v8/src/jsregexp.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -324,7 +324,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
index)));
if (index == -1) return isolate->factory()->null_value();
}
ASSERT(last_match_info->HasFastObjectElements());
ASSERT(last_match_info->HasFastElements());
{
NoHandleAllocation no_handles;
@ -429,7 +429,6 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result =
RegExpEngine::Compile(&compile_data,
flags.is_ignore_case(),
flags.is_global(),
flags.is_multiline(),
pattern,
sample_subject,
@ -516,23 +515,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
int RegExpImpl::GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
int registers_per_match,
int* max_matches) {
#ifdef V8_INTERPRETED_REGEXP
// Global loop in interpreted regexp is not implemented. Therefore we choose
// the size of the offsets vector so that it can only store one match.
*max_matches = 1;
return registers_per_match;
#else // V8_INTERPRETED_REGEXP
int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize);
*max_matches = size / registers_per_match;
return size;
#endif // V8_INTERPRETED_REGEXP
}
int RegExpImpl::IrregexpExecRaw(
RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
@ -634,7 +617,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers, isolate);
int res = RegExpImpl::IrregexpExecRaw(
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
jsregexp, subject, previous_index, Vector<int>(registers.vector(),
registers.length()));
if (res == RE_SUCCESS) {
@ -2191,12 +2174,15 @@ int ActionNode::EatsAtLeast(int still_to_find,
void ActionNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
} else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
on_success()->FillInBMInfo(offset, bm, not_at_start);
on_success()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
@ -2218,11 +2204,15 @@ int AssertionNode::EatsAtLeast(int still_to_find,
}
void AssertionNode::FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
void AssertionNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
if (type() == AT_START && not_at_start) return;
on_success()->FillInBMInfo(offset, bm, not_at_start);
on_success()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@ -2803,14 +2793,20 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
}
void LoopChoiceNode::FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
if (body_can_be_zero_length_) {
void LoopChoiceNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (body_can_be_zero_length_ ||
recursion_depth > RegExpCompiler::kMaxRecursion ||
budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
ChoiceNode::FillInBMInfo(offset, bm, not_at_start);
ChoiceNode::FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@ -2912,7 +2908,7 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler);
FillInBMInfo(0, bm, not_at_start);
FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
}
@ -3850,7 +3846,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
BoyerMooreLookahead* bm =
new BoyerMooreLookahead(eats_at_least, compiler);
GuardedAlternative alt0 = alternatives_->at(0);
alt0.node()->FillInBMInfo(0, bm, not_at_start);
alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
}
} else {
@ -5589,8 +5585,11 @@ void Analysis::VisitAssertion(AssertionNode* that) {
}
void BackReferenceNode::FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
void BackReferenceNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Working out the set of characters that a backreference can match is too
// hard, so we just say that any character can match.
bm->SetRest(offset);
@ -5602,9 +5601,13 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
RegExpMacroAssembler::kTableSize);
void ChoiceNode::FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
void ChoiceNode::FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
ZoneList<GuardedAlternative>* alts = alternatives();
budget = (budget - 1) / alts->length();
for (int i = 0; i < alts->length(); i++) {
GuardedAlternative& alt = alts->at(i);
if (alt.guards() != NULL && alt.guards()->length() != 0) {
@ -5612,14 +5615,18 @@ void ChoiceNode::FillInBMInfo(
SaveBMInfo(bm, not_at_start, offset);
return;
}
alt.node()->FillInBMInfo(offset, bm, not_at_start);
alt.node()->FillInBMInfo(
offset, recursion_depth + 1, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
void TextNode::FillInBMInfo(
int initial_offset, BoyerMooreLookahead* bm, bool not_at_start) {
void TextNode::FillInBMInfo(int initial_offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (initial_offset >= bm->length()) return;
int offset = initial_offset;
int max_char = bm->max_char();
@ -5673,6 +5680,8 @@ void TextNode::FillInBMInfo(
return;
}
on_success()->FillInBMInfo(offset,
recursion_depth + 1,
budget - 1,
bm,
true); // Not at start after a text node.
if (initial_offset == 0) set_bm_info(not_at_start, bm);
@ -5797,7 +5806,6 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpCompileData* data,
bool ignore_case,
bool is_global,
bool is_multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@ -5901,8 +5909,6 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
macro_assembler.set_global(is_global);
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,

100
deps/v8/src/jsregexp.h

@ -109,19 +109,13 @@ class RegExpImpl {
static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject);
// Calculate the size of offsets vector for the case of global regexp
// and the number of matches this vector is able to store.
static int GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
int registers_per_match,
int* max_matches);
// Execute a regular expression on the subject, starting from index.
// If matching succeeds, return the number of matches. This can be larger
// than one in the case of global regular expressions.
// The captures and subcaptures are stored into the registers vector.
// Execute a regular expression once on the subject, starting from
// character "index".
// If successful, returns RE_SUCCESS and set the capture positions
// in the first registers.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
static int IrregexpExecRaw(Handle<JSRegExp> regexp,
static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int> registers);
@ -580,9 +574,14 @@ class RegExpNode: public ZoneObject {
// Collects information on the possible code units (mod 128) that can match if
// we look forward. This is used for a Boyer-Moore-like string searching
// implementation. TODO(erikcorry): This should share more code with
// EatsAtLeast, GetQuickCheckDetails.
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
// EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
// the number of nodes we are willing to look at in order to create this data.
static const int kFillInBMBudget = 200;
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
UNREACHABLE();
}
@ -681,9 +680,13 @@ class SeqRegExpNode: public RegExpNode {
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
virtual RegExpNode* FilterASCII(int depth);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
on_success_->FillInBMInfo(offset, bm, not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
on_success_->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
@ -736,8 +739,11 @@ class ActionNode: public SeqRegExpNode {
return on_success()->GetQuickCheckDetails(
details, compiler, filled_in, not_at_start);
}
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
Type type() { return type_; }
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
@ -805,8 +811,11 @@ class TextNode: public SeqRegExpNode {
virtual int GreedyLoopTextLength();
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
void CalculateOffsets();
virtual RegExpNode* FilterASCII(int depth);
@ -865,8 +874,11 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
AssertionNodeType type() { return type_; }
void set_type(AssertionNodeType type) { type_ = type; }
@ -903,8 +915,11 @@ class BackReferenceNode: public SeqRegExpNode {
bool not_at_start) {
return;
}
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
private:
int start_reg_;
@ -928,8 +943,11 @@ class EndNode: public RegExpNode {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Returning 0 from EatsAtLeast should ensure we never get here.
UNREACHABLE();
}
@ -1018,8 +1036,11 @@ class ChoiceNode: public RegExpNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
bool being_calculated() { return being_calculated_; }
bool not_at_start() { return not_at_start_; }
@ -1068,9 +1089,13 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start) {
alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
alternatives_->at(1).node()->FillInBMInfo(
offset, recursion_depth + 1, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@ -1100,8 +1125,11 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(
int offset, BoyerMooreLookahead* bm, bool not_at_start);
virtual void FillInBMInfo(int offset,
int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
@ -1551,7 +1579,6 @@ class RegExpEngine: public AllStatic {
static CompilationResult Compile(RegExpCompileData* input,
bool ignore_case,
bool global,
bool multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@ -1580,8 +1607,7 @@ class OffsetsVector {
inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; }
static const int kStaticOffsetsVectorSize =
Isolate::kJSRegexpStaticOffsetsVectorSize;
static const int kStaticOffsetsVectorSize = 50;
private:
static Address static_offsets_vector_address(Isolate* isolate) {

8
deps/v8/src/list-inl.h

@ -136,14 +136,6 @@ bool List<T, P>::RemoveElement(const T& elm) {
}
template<typename T, class P>
void List<T, P>::Allocate(int length) {
DeleteData(data_);
Initialize(length);
length_ = length;
}
template<typename T, class P>
void List<T, P>::Clear() {
DeleteData(data_);

3
deps/v8/src/list.h

@ -117,9 +117,6 @@ class List {
// pointer type. Returns the removed element.
INLINE(T RemoveLast()) { return Remove(length_ - 1); }
// Deletes current list contents and allocates space for 'length' elements.
INLINE(void Allocate(int length));
// Clears the list by setting the length to zero. Even if T is a
// pointer type, clearing the list doesn't delete the entries.
INLINE(void Clear());

7
deps/v8/src/lithium.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -225,12 +225,9 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return 2;
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return 3;
case FAST_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;

60
deps/v8/src/liveedit.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -30,7 +30,6 @@
#include "liveedit.h"
#include "code-stubs.h"
#include "compilation-cache.h"
#include "compiler.h"
#include "debug.h"
@ -1476,36 +1475,26 @@ static const char* DropFrames(Vector<StackFrame*> frames,
// Check the nature of the top frame.
Isolate* isolate = Isolate::Current();
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
pre_top_frame_code->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code ==
isolate->debug()->debug_break_slot()) {
// OK, we can drop debug break slot.
*mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(
Builtins::kFrameDropper_LiveEdit)) {
// OK, we can drop our own code.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
frame_has_padding = false;
} else if (pre_top_frame_code ==
isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
*mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
} else if (pre_top_frame_code->kind() == Code::STUB &&
pre_top_frame_code->major_key() == CodeStub::CEntry) {
// Entry from our unit tests on 'debugger' statement.
// It's fine, we support this case.
pre_top_frame_code->major_key()) {
// Entry from our unit tests, it's fine, we support this case.
*mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
// We don't have a padding from 'debugger' statement call.
// Here the stub is CEntry, it's not debug-only and can't be padded.
// If anyone would complain, a proxy padded stub could be added.
frame_has_padding = false;
} else {
return "Unknown structure of stack above changing function";
}
@ -1515,50 +1504,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
- Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
+ kPointerSize; // Bigger address end is exclusive.
Address* top_frame_pc_address = top_frame->pc_address();
// top_frame may be damaged below this point. Do not used it.
ASSERT(!(top_frame = NULL));
if (unused_stack_top > unused_stack_bottom) {
if (frame_has_padding) {
int shortage_bytes =
static_cast<int>(unused_stack_top - unused_stack_bottom);
Address padding_start = pre_top_frame->fp() -
Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
Address padding_pointer = padding_start;
Smi* padding_object =
Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
while (Memory::Object_at(padding_pointer) == padding_object) {
padding_pointer -= kPointerSize;
}
int padding_counter =
Smi::cast(Memory::Object_at(padding_pointer))->value();
if (padding_counter * kPointerSize < shortage_bytes) {
return "Not enough space for frame dropper frame "
"(even with padding frame)";
}
Memory::Object_at(padding_pointer) =
Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
memmove(padding_start + kPointerSize - shortage_bytes,
padding_start + kPointerSize,
Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
unused_stack_top -= shortage_bytes;
STATIC_ASSERT(sizeof(Address) == kPointerSize);
top_frame_pc_address -= shortage_bytes / kPointerSize;
} else {
return "Not enough space for frame dropper frame";
}
}
// Committing now. After this point we should return only NULL value.
@ -1567,7 +1515,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
*top_frame_pc_address = code->entry();
top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
*restarter_frame_function_pointer =

28
deps/v8/src/mark-compact-inl.h

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -52,15 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) {
}
bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
if (MarkObjectWithoutPush(obj)) {
marking_deque_.PushBlack(obj);
return true;
}
return false;
}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@ -71,13 +62,16 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
}
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
SetMark(obj, mark_bit);
return true;
}
return false;
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
MarkBit mark = Marking::MarkBitFrom(object);
bool old_mark = mark.Get();
if (!old_mark) SetMark(object, mark);
return old_mark;
}
void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
}

224
deps/v8/src/mark-compact.cc

@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
encountered_weak_maps_(NULL),
marker_(this, this) { }
encountered_weak_maps_(NULL) { }
#ifdef DEBUG
@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
if (FLAG_collect_maps) ClearNonLiveTransitions();
if (collect_maps_) ClearNonLiveTransitions();
ClearWeakMaps();
@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
if (!FLAG_collect_maps) ReattachInitialMaps();
if (!collect_maps_) ReattachInitialMaps();
Finish();
@ -658,6 +658,11 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
// Disable collection of maps if incremental marking is enabled.
// Map collection algorithm relies on a special map transition tree traversal
// order which is not implemented for incremental marking.
collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
// Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
@ -675,6 +680,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
if (collect_maps_) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
@ -1180,7 +1186,16 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Heap* heap = map->GetHeap();
Code* code = reinterpret_cast<Code*>(object);
if (FLAG_cleanup_code_caches_at_gc) {
code->ClearTypeFeedbackCells(heap);
Object* raw_info = code->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
ASSERT(type_feedback_cells->AstId(i)->IsSmi());
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
}
}
}
code->CodeIterateBody<StaticMarkingVisitor>(heap);
}
@ -1793,11 +1808,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak. Only maps for subclasses
// of JSReceiver can have transitions.
// in a special way to make transition links weak.
// Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
} else {
marking_deque_.PushBlack(map);
}
@ -1807,86 +1822,79 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
}
// Force instantiation of template instances.
template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
template <class T>
void Marker<T>::MarkMapContents(Map* map) {
void MarkCompactCollector::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions.
Object** proto_trans_slot =
HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
if (prototype_transitions->IsFixedArray()) {
mark_compact_collector()->RecordSlot(proto_trans_slot,
proto_trans_slot,
prototype_transitions);
FixedArray* prototype_transitions = map->prototype_transitions();
MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
if (!mark.Get()) {
mark.Set();
MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
prototype_transitions->Size());
}
}
// Make sure that the back pointer stored either in the map itself or inside
// its prototype transitions array is marked. Treat pointers in the descriptor
// array as weak and also mark that array to prevent visiting it later.
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
Object** descriptor_array_slot =
Object** raw_descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
Object* descriptor_array = *descriptor_array_slot;
if (!descriptor_array->IsSmi()) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
}
// Mark the Object* fields of the Map. Since the descriptor array has been
// marked already, it is fine that one of these fields contains a pointer
// to it. But make sure to skip back pointer and prototype transitions.
STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
Object** start_slot = HeapObject::RawField(
map, Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(
map, Map::kPrototypeTransitionsOrBackPointerOffset);
for (Object** slot = start_slot; slot < end_slot; slot++) {
Object* obj = *slot;
if (!obj->NonFailureIsHeapObject()) continue;
mark_compact_collector()->RecordSlot(start_slot, slot, obj);
base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
Object* raw_descriptor_array = *raw_descriptor_array_slot;
if (!raw_descriptor_array->IsSmi()) {
MarkDescriptorArray(
reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
}
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
Object** start_slot = HeapObject::RawField(map,
Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
RecordSlot(slot, slot, accessor);
MarkObjectAndPush(accessor);
}
template <class T>
void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
if (descriptors_mark.Get()) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
ASSERT(descriptors != heap()->empty_descriptor_array());
SetMark(descriptors, descriptors_mark);
// The DescriptorArray contains a pointer to its contents array, but the
// contents array will be marked black and hence not be visited again.
if (!base_marker()->MarkObjectAndPush(descriptors)) return;
FixedArray* contents = FixedArray::cast(
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!IsMarked(contents));
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
base_marker()->MarkObjectWithoutPush(contents);
// Contents contains (value, details) pairs. If the descriptor contains a
// transition (value is a Map), we don't mark the value as live. It might
// be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
MarkBit contents_mark = Marking::MarkBitFrom(contents);
SetMark(contents, contents_mark);
// Contents contains (value, details) pairs. If the details say that the type
// of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
// live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
// CONSTANT_TRANSITION is the value an Object* (a Map*).
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
mark_compact_collector()->RecordSlot(slot, slot, *slot);
RecordSlot(slot, slot, *slot);
switch (details.type()) {
case NORMAL:
@ -1894,22 +1902,21 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
base_marker()->MarkObjectAndPush(value);
MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
base_marker()->MarkObjectAndPush(value);
} else if (base_marker()->MarkObjectWithoutPush(value)) {
AccessorPair* accessors = AccessorPair::cast(value);
MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
MarkObjectAndPush(value);
} else if (!MarkObjectWithoutPush(value)) {
MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
}
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
if (value->IsFixedArray()) MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
@ -1917,16 +1924,26 @@ void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
break;
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
marking_deque_.PushBlack(descriptors);
}
template <class T>
void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
mark_compact_collector()->RecordSlot(slot, slot, accessor);
base_marker()->MarkObjectAndPush(accessor);
void MarkCompactCollector::CreateBackPointers() {
HeapObjectIterator iterator(heap()->map_space());
for (HeapObject* next_object = iterator.Next();
next_object != NULL; next_object = iterator.Next()) {
if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
Map* map = Map::cast(next_object);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
map->CreateBackPointers();
} else {
ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
}
}
}
}
@ -2453,8 +2470,15 @@ void MarkCompactCollector::ReattachInitialMaps() {
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
// is carried out only on maps of JSObjects and related subtypes.
// a marked map to an unmarked map to null transitions. At the same time,
// set all the prototype fields of maps back to their original value,
// dropping the back pointers temporarily stored in the prototype field.
// Setting the prototype field requires following the linked list of
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
for (HeapObject* obj = map_iterator.Next();
obj != NULL; obj = map_iterator.Next()) {
Map* map = reinterpret_cast<Map*>(obj);
@ -2530,16 +2554,36 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
MarkBit map_mark) {
Object* potential_parent = map->GetBackPointer();
if (!potential_parent->IsMap()) return;
Map* parent = Map::cast(potential_parent);
// Follow the chain of back pointers to find the prototype.
Object* real_prototype = map;
while (real_prototype->IsMap()) {
real_prototype = Map::cast(real_prototype)->prototype();
ASSERT(real_prototype->IsHeapObject());
}
// Follow back pointer, check whether we are dealing with a map transition
// from a live map to a dead path and in case clear transitions of parent.
// Follow back pointers, setting them to prototype, clearing map transitions
// when necessary.
Map* current = map;
bool current_is_alive = map_mark.Get();
bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
if (!current_is_alive && parent_is_alive) {
parent->ClearNonLiveTransitions(heap());
bool on_dead_path = !current_is_alive;
while (current->IsMap()) {
Object* next = current->prototype();
// There should never be a dead map above a live map.
ASSERT(on_dead_path || current_is_alive);
// A live map above a dead map indicates a dead transition. This test will
// always be false on the first iteration.
if (on_dead_path && current_is_alive) {
on_dead_path = false;
current->ClearNonLiveTransitions(heap(), real_prototype);
}
Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
*slot = real_prototype;
if (current_is_alive) RecordSlot(slot, slot, real_prototype);
current = reinterpret_cast<Map*>(next);
current_is_alive = Marking::MarkBitFrom(current).Get();
}
}
@ -2738,9 +2782,7 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
// TODO(mstarzinger): This was changed to a sentinel value to track down
// rare crashes, change it back to Smi::FromInt(0) later.
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
}
}
@ -3796,7 +3838,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false;
bool unused_page_present = false;
intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
intptr_t old_space_size = heap()->PromotedSpaceSize();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;

64
deps/v8/src/mark-compact.h

@ -42,7 +42,6 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations.
class CodeFlusher;
class GCTracer;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
@ -167,6 +166,7 @@ class Marking {
// ----------------------------------------------------------------------------
// Marking deque for tracing live objects.
class MarkingDeque {
public:
MarkingDeque()
@ -383,34 +383,6 @@ class SlotsBuffer {
};
// -------------------------------------------------------------------------
// Marker shared between incremental and non-incremental marking
template<class BaseMarker> class Marker {
public:
Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
: base_marker_(base_marker),
mark_compact_collector_(mark_compact_collector) {}
// Mark pointers in a Map and its DescriptorArray together, possibly
// treating transitions or back pointers weak.
void MarkMapContents(Map* map);
void MarkDescriptorArray(DescriptorArray* descriptors);
void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
private:
BaseMarker* base_marker() {
return base_marker_;
}
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_;
}
BaseMarker* base_marker_;
MarkCompactCollector* mark_compact_collector_;
};
// Defined in isolate.h.
class ThreadLocalTop;
@ -612,6 +584,8 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
bool collect_maps_;
bool flush_monomorphic_ics_;
// A pointer to the current stack-allocated GC tracer object during a full
@ -634,13 +608,12 @@ class MarkCompactCollector {
//
// After: Live objects are marked and non-live objects are unmarked.
friend class RootMarkingVisitor;
friend class MarkingVisitor;
friend class StaticMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class Marker<IncrementalMarking>;
friend class Marker<MarkCompactCollector>;
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
@ -658,25 +631,29 @@ class MarkCompactCollector {
void AfterMarking();
// Marks the object black and pushes it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for non-incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
// This is for non-incremental marking.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for non-incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
INLINE(bool MarkObjectWithoutPush(HeapObject* object));
INLINE(void MarkObjectAndPush(HeapObject* value));
// Marks the object black assuming that it is not yet marked.
// This is for non-incremental marking only.
// Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
// in ClearNonLiveTransitions(). All JSObject maps
// connected by map transitions have the same prototype object, which
// is why we can use this field temporarily for back pointers.
void CreateBackPointers();
// Mark a Map and its DescriptorArray together, skipping transitions.
void MarkMapContents(Map* map);
void MarkAccessorPairSlot(HeapObject* accessors, int offset);
void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
@ -779,7 +756,6 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;

230
deps/v8/src/messages.js

@ -61,21 +61,18 @@ function FormatString(format, message) {
// To check if something is a native error we need to check the
// concrete native error types. It is not sufficient to use instanceof
// since it possible to create an object that has Error.prototype on
// its prototype chain. This is the case for DOMException for example.
// concrete native error types. It is not enough to check "obj
// instanceof $Error" because user code can replace
// NativeError.prototype.__proto__. User code cannot replace
// NativeError.prototype though and therefore this is a safe test.
function IsNativeErrorObject(obj) {
switch (%_ClassOf(obj)) {
case 'Error':
case 'EvalError':
case 'RangeError':
case 'ReferenceError':
case 'SyntaxError':
case 'TypeError':
case 'URIError':
return true;
}
return false;
return (obj instanceof $Error) ||
(obj instanceof $EvalError) ||
(obj instanceof $RangeError) ||
(obj instanceof $ReferenceError) ||
(obj instanceof $SyntaxError) ||
(obj instanceof $TypeError) ||
(obj instanceof $URIError);
}
@ -748,7 +745,7 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
return new CallSite(recv, fun, pos).toString();
return FormatSourcePosition(new CallSite(recv, fun, pos));
}
// ----------------------------------------------------------------------------
@ -788,7 +785,15 @@ function CallSiteGetThis() {
}
function CallSiteGetTypeName() {
return GetTypeName(this, false);
var constructor = this.receiver.constructor;
if (!constructor) {
return %_CallFunction(this.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return %_CallFunction(this.receiver, ObjectToString);
}
return constructorName;
}
function CallSiteIsToplevel() {
@ -822,10 +827,8 @@ function CallSiteGetFunctionName() {
var name = this.fun.name;
if (name) {
return name;
}
name = %FunctionGetInferredName(this.fun);
if (name) {
return name;
} else {
return %FunctionGetInferredName(this.fun);
}
// Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun);
@ -916,69 +919,6 @@ function CallSiteIsConstructor() {
return this.fun === constructor;
}
function CallSiteToString() {
var fileName;
var fileLocation = "";
if (this.isNative()) {
fileLocation = "native";
} else if (this.isEval()) {
fileName = this.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = this.getEvalOrigin();
}
} else {
fileName = this.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = this.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = this.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = this.getFunctionName();
var addSuffix = true;
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
var typeName = GetTypeName(this, true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName && functionName.indexOf(typeName) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName && functionName.lastIndexOf("." + methodName) !=
functionName.length - methodName.length - 1) {
line += " [as " + methodName + "]";
}
} else {
line += typeName + "." + (methodName || "<anonymous>");
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addSuffix = false;
}
if (addSuffix) {
line += " (" + fileLocation + ")";
}
return line;
}
SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getThis", CallSiteGetThis,
"getTypeName", CallSiteGetTypeName,
@ -994,8 +934,7 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getColumnNumber", CallSiteGetColumnNumber,
"isNative", CallSiteIsNative,
"getPosition", CallSiteGetPosition,
"isConstructor", CallSiteIsConstructor,
"toString", CallSiteToString
"isConstructor", CallSiteIsConstructor
));
@ -1037,6 +976,65 @@ function FormatEvalOrigin(script) {
return eval_origin;
}
function FormatSourcePosition(frame) {
var fileName;
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
fileName = frame.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = frame.getEvalOrigin();
}
} else {
fileName = frame.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = frame.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = frame.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = frame.getFunction().name;
var addPrefix = true;
var isConstructor = frame.isConstructor();
var isMethodCall = !(frame.isToplevel() || isConstructor);
if (isMethodCall) {
var methodName = frame.getMethodName();
line += frame.getTypeName() + ".";
if (functionName) {
line += functionName;
if (methodName && (methodName != functionName)) {
line += " [as " + methodName + "]";
}
} else {
line += methodName || "<anonymous>";
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addPrefix = false;
}
if (addPrefix) {
line += " (" + fileLocation + ")";
}
return line;
}
function FormatStackTrace(error, frames) {
var lines = [];
try {
@ -1052,7 +1050,7 @@ function FormatStackTrace(error, frames) {
var frame = frames[i];
var line;
try {
line = frame.toString();
line = FormatSourcePosition(frame);
} catch (e) {
try {
line = "<error: " + e + ">";
@ -1083,19 +1081,6 @@ function FormatRawStackTrace(error, raw_stack) {
}
}
function GetTypeName(obj, requireConstructor) {
var constructor = obj.receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
return constructorName;
}
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
@ -1140,7 +1125,13 @@ function SetUpError() {
}
%FunctionSetInstanceClassName(f, 'Error');
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
%SetProperty(f.prototype, "name", name, DONT_ENUM);
// The name property on the prototype of error objects is not
// specified as being read-one and dont-delete. However, allowing
// overwriting allows leaks of error objects between script blocks
// in the same context in a browser setting. Therefore we fix the
// name.
%SetProperty(f.prototype, "name", name,
DONT_ENUM | DONT_DELETE | READ_ONLY) ;
%SetCode(f, function(m) {
if (%_IsConstructCall()) {
// Define all the expected properties directly on the error
@ -1156,8 +1147,10 @@ function SetUpError() {
return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
});
} else if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
%IgnoreAttributesAndSetProperty(this,
'message',
ToString(m),
DONT_ENUM);
}
captureStackTrace(this, f);
} else {
@ -1187,41 +1180,16 @@ $Error.captureStackTrace = captureStackTrace;
var visited_errors = new InternalArray();
var cyclic_error_marker = new $Object();
function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
// Climb the prototype chain until we find the holder.
while (error && !%HasLocalProperty(error, name)) {
error = error.__proto__;
}
if (error === null) return void 0;
if (!IS_OBJECT(error)) return error[name];
// If the property is an accessor on one of the predefined errors that can be
// generated statically by the compiler, don't touch it. This is to address
// http://code.google.com/p/chromium/issues/detail?id=69187
var desc = %GetOwnProperty(error, name);
if (desc && desc[IS_ACCESSOR_INDEX]) {
var isName = name === "name";
if (error === $ReferenceError.prototype)
return isName ? "ReferenceError" : void 0;
if (error === $SyntaxError.prototype)
return isName ? "SyntaxError" : void 0;
if (error === $TypeError.prototype)
return isName ? "TypeError" : void 0;
}
// Otherwise, read normally.
return error[name];
}
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
var type = error.type;
var name = error.name;
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
var message = error.message;
var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
if (type && !hasMessage) {
var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
message = FormatMessage(%NewMessageObject(type, args));
message = FormatMessage(%NewMessageObject(type, error.arguments));
}
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;

9
deps/v8/src/mips/builtins-mips.cc

@ -118,7 +118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -214,8 +214,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@ -450,10 +449,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,

38
deps/v8/src/mips/code-stubs-mips.cc

@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
const int kRegExpExecuteArguments = 8;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
@ -5054,33 +5054,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment):
// [sp + 5] - Argument 9
// [sp + 4] - Argument 8
// [sp + 3] - Argument 7
// [sp + 2] - Argument 6
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
// Argument 9: Pass current isolate address.
// Argument 8: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address()));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
// Argument 7: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1));
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 7: Start (high end) of backtracking stack memory area.
// Argument 6: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2);
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer.
@ -5131,9 +5125,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
@ -7370,8 +7362,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@ -7637,9 +7629,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(a2, t1, &double_elements);
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
__ JumpIfSmi(a0, &smi_element);
__ CheckFastSmiElements(a2, t1, &fast_elements);
__ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7651,7 +7643,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(t1, t0);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7664,8 +7656,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
__ bind(&smi_element);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7674,7 +7666,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,

4
deps/v8/src/mips/codegen-mips.cc

@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value

4
deps/v8/src/mips/debug-mips.cc

@ -1,4 +1,4 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -116,8 +116,6 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
Assembler::kDebugBreakSlotInstructions);
}
const bool Debug::FramePaddingLayout::kIsSupported = false;
#define __ ACCESS_MASM(masm)

8
deps/v8/src/mips/full-codegen-mips.cc

@ -1711,8 +1711,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1734,7 +1733,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
if (constant_elements_kind == FAST_ELEMENTS) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));

56
deps/v8/src/mips/ic-mips.cc

@ -1347,35 +1347,34 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&non_double_value, ne, t0, Operand(at));
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are double, but value is an Object that's not a HeapNumber. Make
// sure that the receiver is a Array with Object elements and transition array
// from double elements to Object elements.
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
@ -1472,7 +1471,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@ -1689,12 +1688,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
void PatchInlinedSmiCode(Address address) {
Address andi_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@ -1728,30 +1727,33 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
// This is patching a conditional "jump if not smi/jump if smi" site.
// Enabling by changing from
ASSERT(Assembler::IsAndImmediate(instr_at_patch));
ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::IsBeq(branch_instr)) {
// This is patching a "jump if not smi" site to be active.
// Changing:
// andi at, rx, 0
// Branch <target>, eq, at, Operand(zero_reg)
// to:
// andi at, rx, #kSmiTagMask
// Branch <target>, ne, at, Operand(zero_reg)
// and vice-versa to be disabled again.
CodePatcher patcher(patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
if (check == ENABLE_INLINED_SMI_CHECK) {
ASSERT(Assembler::IsAndImmediate(instr_at_patch));
ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask);
} else {
ASSERT(check == DISABLE_INLINED_SMI_CHECK);
ASSERT(Assembler::IsAndImmediate(instr_at_patch));
patcher.masm()->andi(at, reg, 0);
}
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::IsBeq(branch_instr)) {
patcher.ChangeBranchCondition(ne);
} else {
ASSERT(Assembler::IsBne(branch_instr));
// This is patching a "jump if smi" site to be active.
// Changing:
// andi at, rx, 0
// Branch <target>, ne, at, Operand(zero_reg)
// to:
// andi at, rx, #kSmiTagMask
// Branch <target>, eq, at, Operand(zero_reg)
CodePatcher patcher(patch_address, 2);
Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
patcher.masm()->andi(at, reg, kSmiTagMask);
patcher.ChangeBranchCondition(eq);
}
}

134
deps/v8/src/mips/lithium-codegen-mips.cc

@ -2343,37 +2343,40 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
if (map_count == 0 && !need_generic) {
DeoptimizeIf(al, instr->environment());
return;
}
Handle<String> name = instr->hydrogen()->name();
if (map_count == 0) {
ASSERT(instr->hydrogen()->need_generic());
__ li(a2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
Label done;
__ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
for (int i = 0; i < map_count; ++i) {
bool last = (i == map_count - 1);
for (int i = 0; i < map_count - 1; ++i) {
Handle<Map> map = instr->hydrogen()->types()->at(i);
if (last && !need_generic) {
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next;
__ Branch(&next, ne, scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ Branch(&done);
__ bind(&next);
}
}
if (need_generic) {
Handle<Map> map = instr->hydrogen()->types()->last();
if (instr->hydrogen()->need_generic()) {
Label generic;
__ Branch(&generic, ne, scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
__ Branch(&done);
__ bind(&generic);
__ li(a2, Operand(name));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
}
__ bind(&done);
}
}
@ -2448,10 +2451,8 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ Branch(&fail, lt, scratch,
Operand(GetInitialFastElementsKind()));
__ Branch(&done, le, scratch,
Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ Branch(&done, eq, scratch,
Operand(FAST_ELEMENTS));
__ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch,
@ -2504,9 +2505,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
__ addu(scratch, elements, scratch);
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ lw(result, FieldMemOperand(scratch, offset));
__ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2537,21 +2536,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
if (key_is_constant) {
__ Addu(elements, elements,
Operand(((constant_key + instr->additional_index()) << shift_size) +
__ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements,
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
__ ldc1(result, MemOperand(elements));
}
@ -2573,41 +2568,32 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << shift_size);
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0(), additional_offset));
__ lwc1(result, MemOperand(scratch0()));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ ldc1(result, MemOperand(scratch0(), additional_offset));
__ ldc1(result, MemOperand(scratch0()));
}
} else {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg);
if (key_is_constant) {
mem_operand =
MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset);
mem_operand = MemOperand(external_pointer,
constant_key * (1 << shift_size));
} else {
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@ -2640,10 +2626,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3525,17 +3508,11 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ sw(value, FieldMemOperand(elements, offset));
} else {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
if (instr->additional_index() != 0) {
__ Addu(scratch,
scratch,
instr->additional_index() << kPointerSizeLog2);
}
__ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@ -3578,7 +3555,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
__ Addu(scratch, elements, Operand((constant_key << shift_size) +
__ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
@ -3599,7 +3576,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
__ bind(&not_nan);
__ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
__ sdc1(value, MemOperand(scratch));
}
@ -3620,13 +3597,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key << shift_size);
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@ -3634,27 +3610,19 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
__ swc1(double_scratch0(), MemOperand(scratch0()));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ sdc1(value, MemOperand(scratch0(), additional_offset));
__ sdc1(value, MemOperand(scratch0()));
}
} else {
Register value(ToRegister(instr->value()));
Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg);
Register scratch = scratch0();
if (key_is_constant) {
mem_operand = MemOperand(external_pointer,
((constant_key + instr->additional_index())
<< shift_size));
constant_key * (1 << shift_size));
} else {
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@ -3676,10 +3644,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3717,21 +3682,20 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
@ -4486,9 +4450,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@ -4641,11 +4604,10 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));

5
deps/v8/src/mips/lithium-mips.cc

@ -2023,9 +2023,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =

6
deps/v8/src/mips/lithium-mips.h

@ -1201,7 +1201,6 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1218,7 +1217,6 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1239,7 +1237,6 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1708,7 +1705,6 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1731,7 +1727,6 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1776,7 +1771,6 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

81
deps/v8/src/mips/macro-assembler-mips.cc

@ -3341,39 +3341,33 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue));
Operand(Map::kMaximumBitField2FastElementValue));
}
void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
}
@ -3475,17 +3469,22 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(current_map));
right = Operand(Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(transitioned_double_map));
}
}
@ -4444,37 +4443,27 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
lw(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
Branch(no_map_match, ne, map_in_out, Operand(scratch));
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
Branch(no_map_match, ne, map_in_out, Operand(at));
// Use the transitioned cached map.
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
lw(map_in_out, FieldMemOperand(scratch, offset));
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
Register function_in, Register scratch, Register map_out) {
ASSERT(!function_in.is(map_out));
Label done;
lw(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
map_out,
scratch,
&done);
@ -5389,7 +5378,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) {
masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.

5
deps/v8/src/mips/macro-assembler-mips.h

@ -819,8 +819,7 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out,
bool can_have_holes);
Register map_out);
void LoadGlobalFunction(int index, Register function);
@ -962,7 +961,7 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);

136
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -43,31 +43,27 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - t7 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - t4 : Points to tip of backtrack stack
* - t4 : points to tip of backtrack stack
* - t5 : Unused.
* - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - sp : Points to tip of C stack.
* - sp : points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
*
* - fp[64] Isolate* isolate (address of the current isolate)
* - fp[60] direct_call (if 1, direct call from JavaScript code,
* - fp[56] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[56] stack_area_base (High end of the memory area to use as
* - fp[52] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[52] capture array size (may fit multiple sets of matches)
* - fp[48] int* capture_array (int[num_saved_registers_], for output).
* - fp[44] secondary link/return address used by native call.
* --- sp when called ---
@ -75,17 +71,16 @@ namespace internal {
* - fp[36] old frame pointer (r11).
* - fp[0..32] backup of registers s0..s7.
* --- frame pointer ----
* - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (address of first character in string).
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-28] At start (if 1, we are starting at the start of the
* - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-32] register 0 (Only positions must be stored in the first
* - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@ -206,8 +201,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
@ -219,8 +214,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
__ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
@ -645,7 +640,6 @@ void RegExpMacroAssemblerMIPS::Fail() {
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
if (masm_->has_exception()) {
// If the code gets corrupted due to long regular expressions and lack of
// space on trampolines, an internal exception flag is set. If this case
@ -675,9 +669,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
__ push(a0); // Make room for "position - 1" constant (value irrelevant).
__ push(a0); // Make room for "at start" constant (value irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@ -696,12 +689,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(a0);
// If returned value is non-zero, we exit with the returned value as result.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
__ Branch(&exit_label_, ne, v0, Operand(zero_reg));
__ bind(&stack_ok);
// Allocate space on stack for registers.
@ -722,25 +715,16 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// position registers.
__ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
__ li(current_character(), Operand('\n'));
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ mov(t5, a1);
__ li(a1, Operand(1));
__ Movn(a1, zero_reg, t5);
__ sw(a1, MemOperand(frame_pointer(), kAtStart));
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1.
if (num_saved_registers_ > 8) {
// Address of register 0.
__ Addu(a1, frame_pointer(), Operand(kRegisterZero));
__ li(a2, Operand(num_saved_registers_));
@ -750,16 +734,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a1, a1, Operand(-kPointerSize));
__ Subu(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ sw(a0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
// Load previous char as initial value of current character register.
Label at_start;
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
__ Branch(&at_start, ne, a0, Operand(zero_reg));
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ li(current_character(), Operand('\n'));
__ jmp(&start_label_);
@ -788,10 +776,6 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1));
if (global()) {
// Keep capture start in a4 for the zero-length check later.
__ mov(t7, a2);
}
if (mode_ == UC16) {
__ sra(a2, a2, 1);
__ Addu(a2, a2, a1);
@ -807,52 +791,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a0, a0, kPointerSize);
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ Addu(a0, a0, 1);
__ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Subu(a1, a1, num_saved_registers_);
// Check whether we have enough room for another set of capture results.
__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
__ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ Addu(a2, a2, num_saved_registers_ * kPointerSize);
__ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
__ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// t7: capture start index
// Not a zero-length match, restart.
__ Branch(
&load_char_start_regexp, ne, current_input_offset(), Operand(t7));
// Offset from the end is zero if we already reached the end.
__ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg));
// Advance current position after a zero-length match.
__ Addu(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ Branch(&load_char_start_regexp);
} else {
__ li(v0, Operand(SUCCESS));
}
}
// Exit and return v0.
__ bind(&exit_label_);
if (global()) {
__ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_v0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@ -878,7 +820,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers_to_retain);
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
__ Branch(&exit_label_, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -922,7 +864,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ li(v0, Operand(EXCEPTION));
__ jmp(&return_v0);
__ jmp(&exit_label_);
}
}
@ -1070,9 +1012,8 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
}
bool RegExpMacroAssemblerMIPS::Succeed() {
void RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_);
return global();
}
@ -1339,9 +1280,8 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
// t7 is not being used to store the capture start index at this point.
__ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
offset = t7;
__ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
offset = a0;
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.

11
deps/v8/src/mips/regexp-macro-assembler-mips.h

@ -115,7 +115,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual bool Succeed();
virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -141,8 +141,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@ -154,10 +153,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
static const int kRegisterZero = kAtStart - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save