Browse Source

Upgrade v8 to 3.11.7

v0.9.1-release
isaacs 13 years ago
parent
commit
cbdf3393a2
  1. 62
      deps/v8/ChangeLog
  2. 1
      deps/v8/Makefile
  3. 4
      deps/v8/SConstruct
  4. 285
      deps/v8/build/common.gypi
  5. 8
      deps/v8/include/v8.h
  6. 1
      deps/v8/src/SConscript
  7. 31
      deps/v8/src/api.cc
  8. 4
      deps/v8/src/api.h
  9. 9
      deps/v8/src/arm/builtins-arm.cc
  10. 35
      deps/v8/src/arm/code-stubs-arm.cc
  11. 4
      deps/v8/src/arm/codegen-arm.cc
  12. 105
      deps/v8/src/arm/full-codegen-arm.cc
  13. 14
      deps/v8/src/arm/ic-arm.cc
  14. 5
      deps/v8/src/arm/lithium-arm.cc
  15. 9
      deps/v8/src/arm/lithium-arm.h
  16. 104
      deps/v8/src/arm/lithium-codegen-arm.cc
  17. 86
      deps/v8/src/arm/macro-assembler-arm.cc
  18. 9
      deps/v8/src/arm/macro-assembler-arm.h
  19. 170
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  20. 13
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  21. 12
      deps/v8/src/arm/simulator-arm.h
  22. 50
      deps/v8/src/arm/stub-cache-arm.cc
  23. 10
      deps/v8/src/array.js
  24. 20
      deps/v8/src/bootstrapper.cc
  25. 99
      deps/v8/src/builtins.cc
  26. 32
      deps/v8/src/code-stubs.cc
  27. 6
      deps/v8/src/codegen.h
  28. 2
      deps/v8/src/compiler.cc
  29. 20
      deps/v8/src/contexts.h
  30. 22
      deps/v8/src/d8.cc
  31. 1
      deps/v8/src/d8.h
  32. 32
      deps/v8/src/debug-agent.cc
  33. 134
      deps/v8/src/elements-kind.cc
  34. 210
      deps/v8/src/elements-kind.h
  35. 370
      deps/v8/src/elements.cc
  36. 5
      deps/v8/src/elements.h
  37. 5
      deps/v8/src/factory.cc
  38. 17
      deps/v8/src/factory.h
  39. 3
      deps/v8/src/flag-definitions.h
  40. 3
      deps/v8/src/full-codegen.cc
  41. 2
      deps/v8/src/func-name-inferrer.h
  42. 20
      deps/v8/src/heap-inl.h
  43. 47
      deps/v8/src/heap.cc
  44. 2
      deps/v8/src/heap.h
  45. 23
      deps/v8/src/hydrogen-instructions.cc
  46. 182
      deps/v8/src/hydrogen-instructions.h
  47. 213
      deps/v8/src/hydrogen.cc
  48. 2
      deps/v8/src/hydrogen.h
  49. 9
      deps/v8/src/ia32/builtins-ia32.cc
  50. 38
      deps/v8/src/ia32/code-stubs-ia32.cc
  51. 4
      deps/v8/src/ia32/codegen-ia32.cc
  52. 2
      deps/v8/src/ia32/debug-ia32.cc
  53. 109
      deps/v8/src/ia32/full-codegen-ia32.cc
  54. 16
      deps/v8/src/ia32/ic-ia32.cc
  55. 123
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  56. 3
      deps/v8/src/ia32/lithium-codegen-ia32.h
  57. 8
      deps/v8/src/ia32/lithium-ia32.cc
  58. 12
      deps/v8/src/ia32/lithium-ia32.h
  59. 88
      deps/v8/src/ia32/macro-assembler-ia32.cc
  60. 9
      deps/v8/src/ia32/macro-assembler-ia32.h
  61. 156
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  62. 13
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  63. 8
      deps/v8/src/ia32/simulator-ia32.h
  64. 27
      deps/v8/src/ia32/stub-cache-ia32.cc
  65. 65
      deps/v8/src/ic.cc
  66. 8
      deps/v8/src/ic.h
  67. 24
      deps/v8/src/incremental-marking-inl.h
  68. 41
      deps/v8/src/incremental-marking.cc
  69. 15
      deps/v8/src/incremental-marking.h
  70. 2
      deps/v8/src/isolate.h
  71. 28
      deps/v8/src/jsregexp.cc
  72. 26
      deps/v8/src/jsregexp.h
  73. 7
      deps/v8/src/lithium.cc
  74. 28
      deps/v8/src/mark-compact-inl.h
  75. 158
      deps/v8/src/mark-compact.cc
  76. 57
      deps/v8/src/mark-compact.h
  77. 183
      deps/v8/src/messages.js
  78. 9
      deps/v8/src/mips/builtins-mips.cc
  79. 38
      deps/v8/src/mips/code-stubs-mips.cc
  80. 4
      deps/v8/src/mips/codegen-mips.cc
  81. 106
      deps/v8/src/mips/full-codegen-mips.cc
  82. 23
      deps/v8/src/mips/ic-mips.cc
  83. 114
      deps/v8/src/mips/lithium-codegen-mips.cc
  84. 5
      deps/v8/src/mips/lithium-mips.cc
  85. 10
      deps/v8/src/mips/lithium-mips.h
  86. 87
      deps/v8/src/mips/macro-assembler-mips.cc
  87. 9
      deps/v8/src/mips/macro-assembler-mips.h
  88. 160
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  89. 11
      deps/v8/src/mips/regexp-macro-assembler-mips.h
  90. 10
      deps/v8/src/mips/simulator-mips.h
  91. 51
      deps/v8/src/mips/stub-cache-mips.cc
  92. 52
      deps/v8/src/objects-debug.cc
  93. 247
      deps/v8/src/objects-inl.h
  94. 5
      deps/v8/src/objects-printer.cc
  95. 484
      deps/v8/src/objects.cc
  96. 119
      deps/v8/src/objects.h
  97. 20
      deps/v8/src/parser.cc
  98. 33
      deps/v8/src/platform-posix.cc
  99. 18
      deps/v8/src/platform-win32.cc
  100. 3
      deps/v8/src/platform.h

62
deps/v8/ChangeLog

@ -1,3 +1,65 @@
2012-05-29: Version 3.11.7
Get better function names in stack traces.
Performance and stability improvements on all platforms.
2012-05-24: Version 3.11.6
Fixed RegExp.prototype.toString for incompatible receivers
(issue 1981).
Performance and stability improvements on all platforms.
2012-05-23: Version 3.11.5
Performance and stability improvements on all platforms.
2012-05-22: Version 3.11.4
Some cleanup to common.gypi. This fixes some host/target combinations
that weren't working in the Make build on Mac.
Handle EINTR in socket functions and continue incomplete sends.
(issue 2098)
Fixed python deprecations. (issue 1391)
Made socket send and receive more robust and return 0 on failure.
(Chromium issue 15719)
Fixed GCC 4.7 (C++11) compilation. (issue 2136)
Set '-m32' option for host and target platforms
Performance and stability improvements on all platforms.
2012-05-18: Version 3.11.3
Disable optimization for functions that have scopes that cannot be
reconstructed from the context chain. (issue 2071)
Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
Correctly check for native error objects. (Chromium issue 2138)
Performance and stability improvements on all platforms.
2012-05-16: Version 3.11.2
Revert r11496. (Chromium issue 128146)
Implement map collection for incremental marking. (issue 1465)
Add toString method to CallSite (which describes a frame of the
stack trace).
2012-05-15: Version 3.11.1
Added a readbuffer function to d8 that reads a file into an ArrayBuffer.

1
deps/v8/Makefile

@ -228,6 +228,7 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
must-set-ANDROID_NDK_ROOT
GYP_GENERATORS=make \
CC="${ANDROID_TOOL_PREFIX}-gcc" \
CXX="${ANDROID_TOOL_PREFIX}-g++" \
build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-S.android $(GYPFLAGS)

4
deps/v8/SConstruct

@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
'CPPDEFINES': ['V8_SHARED'],
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
'library:shared': {
'CPPDEFINES': ['V8_SHARED']
'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
}
},
'os:freebsd': {

285
deps/v8/build/common.gypi

@ -110,151 +110,117 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['OS!="mac"', {
# TODO(mark): The OS!="mac" conditional is temporary. It can be
# removed once the Mac Chromium build stops setting target_arch to
# ia32 and instead sets it to mac. Other checks in this file for
# OS=="mac" can be removed at that time as well. This can be cleaned
# up once http://crbug.com/44205 is fixed.
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
],
'conditions': [
['v8_target_arch=="arm"', {
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'V8_TARGET_ARCH_ARM',
'CAN_USE_UNALIGNED_ACCESSES=1',
],
'conditions': [
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="false"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
[ 'v8_can_use_vfp_instructions=="true"', {
'defines': [
'CAN_USE_VFP_INSTRUCTIONS',
],
}],
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP_INSTRUCTIONS',
],
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-mfloat-abi=hard',],
}],
],
}, {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
# The ARM assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64" or OS=="android"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}],
],
}],
}],
[ 'v8_can_use_unaligned_accesses=="false"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
['v8_target_arch=="ia32"', {
[ 'v8_can_use_vfp_instructions=="true"', {
'defines': [
'V8_TARGET_ARCH_IA32',
'CAN_USE_VFP_INSTRUCTIONS',
],
}],
['v8_target_arch=="mips"', {
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP_INSTRUCTIONS',
],
'variables': {
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
],
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-mfloat-abi=hard',],
}],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
# The MIPS assembler assumes the host is 32 bits,
# so force building 32-bit host tools.
['host_arch=="x64"', {
'target_conditions': [
['_toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
],
}, {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="mips"', {
'defines': [
'V8_TARGET_ARCH_MIPS',
],
'variables': {
'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
}, {
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
],
}],
['v8_target_arch=="x64"', {
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'V8_TARGET_ARCH_X64',
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
],
}, { # Section for OS=="mac".
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {
'ARCHS': ['i386'],
}
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
'CAN_USE_FPU_INSTRUCTIONS',
],
}, {
'defines': [
'__mips_soft_float=1'
],
}],
['target_arch=="x64"', {
'xcode_settings': {
'ARCHS': ['x86_64'],
}
['mips_arch_variant=="mips32r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',],
}],
['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',],
}],
],
}],
}], # v8_target_arch=="mips"
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
],
'xcode_settings': {
'ARCHS': [ 'x86_64' ],
},
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}], # v8_target_arch=="x64"
['v8_use_liveobjectlist=="true"', {
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
@ -272,6 +238,11 @@
'defines': [
'WIN32',
],
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
}],
['OS=="win" and v8_enable_prof==1', {
'msvs_settings': {
@ -280,24 +251,9 @@
},
},
}],
['OS=="win" and v8_target_arch=="x64"', {
'msvs_settings': {
'VCLinkerTool': {
'StackReserveSize': '2097152',
},
},
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', {
'conditions': [
[ 'v8_target_arch!="x64"', {
# Pass -m32 to the compiler iff it understands the flag.
'variables': {
'm32flag': '<!((echo | $(echo ${CXX:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
}],
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing' ],
}],
@ -306,6 +262,41 @@
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
}],
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="mips")', {
# Check whether the host compiler and target compiler support the
# '-m32' option and set it if so.
'target_conditions': [
['_toolset=="host"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
['_toolset=="target"', {
'variables': {
'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
'xcode_settings': {
'ARCHS': [ 'i386' ],
},
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
], # conditions
'configurations': {
'Debug': {
@ -332,14 +323,8 @@
},
},
'conditions': [
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -367,12 +352,6 @@
}],
],
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],
}],
['OS=="netbsd"', {
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '3', # -O3
@ -385,10 +364,6 @@
},
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'msvs_settings': {
'VCCLCompilerTool': {
'Optimization': '2',

8
deps/v8/include/v8.h

@ -62,11 +62,13 @@
#else // _WIN32
// Setup for Linux shared library export. There is no need to distinguish
// between building or using the V8 shared library, but we should not
// export symbols when we are building a static library.
// Setup for Linux shared library export.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __attribute__ ((visibility("default")))
#else
#define V8EXPORT
#endif
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)

1
deps/v8/src/SConscript

@ -68,6 +68,7 @@ SOURCES = {
diy-fp.cc
dtoa.cc
elements.cc
elements-kind.cc
execution.cc
factory.cc
flags.cc

31
deps/v8/src/api.cc

@ -5040,7 +5040,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) {
if (!self->HasFastObjectElements()) {
return Local<Object>();
}
i::FixedArray* elms = i::FixedArray::cast(self->elements());
@ -6045,13 +6045,6 @@ int HeapGraphNode::GetSelfSize() const {
}
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->retained_size();
}
int HeapGraphNode::GetChildrenCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
@ -6067,28 +6060,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
}
int HeapGraphNode::GetRetainersCount() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
}
v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");

4
deps/v8/src/api.h

@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle<v8::internal::Object> obj)
v8::internal::Object* NeanderObject::get(int offset) {
ASSERT(value()->HasFastElements());
ASSERT(value()->HasFastObjectElements());
return v8::internal::FixedArray::cast(value()->elements())->get(offset);
}
void NeanderObject::set(int offset, v8::internal::Object* value) {
ASSERT(value_->HasFastElements());
ASSERT(value_->HasFastObjectElements());
v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
}

9
deps/v8/src/arm/builtins-arm.cc

@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ b(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// r3: JSArray
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r2,
r9,

35
deps/v8/src/arm/code-stubs-arm.cc

@ -4824,27 +4824,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 8;
const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
// Argument 8 (sp[16]): Pass current isolate address.
// Argument 9 (sp[20]): Pass current isolate address.
__ mov(r0, Operand(ExternalReference::isolate_address()));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
__ str(r0, MemOperand(sp, 5 * kPointerSize));
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
__ str(r0, MemOperand(sp, 4 * kPointerSize));
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(r0, Operand(0));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@ -4893,7 +4898,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ b(eq, &success);
Label failure;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
@ -7095,8 +7102,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
@ -7359,9 +7366,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(r2, r5, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
__ JumpIfSmi(r0, &smi_element);
__ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
__ CheckFastSmiElements(r2, r5, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7373,7 +7380,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(r5, r4);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
@ -7384,8 +7391,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Ret();
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));

4
deps/v8/src/arm/codegen-arm.cc

@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value

105
deps/v8/src/arm/full-codegen-arm.cc

@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1722,8 +1722,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1751,7 +1750,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ ldr(r6, MemOperand(sp)); // Copy of array literal.
__ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
@ -3466,104 +3465,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
Label done;
Label slow_case;
Register object = r0;
Register index1 = r1;
Register index2 = r2;
Register elements = r3;
Register scratch1 = r4;
Register scratch2 = r5;
__ ldr(object, MemOperand(sp, 2 * kPointerSize));
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE);
__ b(ne, &slow_case);
// Map is now in scratch1.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
__ b(ne, &slow_case);
// Check the object's elements are in fast case and writable.
__ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset));
__ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch1, ip);
__ b(ne, &slow_case);
// Check that both indices are smis.
__ ldr(index1, MemOperand(sp, 1 * kPointerSize));
__ ldr(index2, MemOperand(sp, 0));
__ JumpIfNotBothSmi(index1, index2, &slow_case);
// Check that both indices are valid.
__ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
__ cmp(scratch1, index1);
__ cmp(scratch1, index2, hi);
__ b(ls, &slow_case);
// Bring the address of the elements into index1 and index2.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(index1,
scratch1,
Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(index2,
scratch1,
Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
// Swap elements.
__ ldr(scratch1, MemOperand(index1, 0));
__ ldr(scratch2, MemOperand(index2, 0));
__ str(scratch1, MemOperand(index2, 0));
__ str(scratch2, MemOperand(index1, 0));
Label no_remembered_set;
__ CheckPageFlag(elements,
scratch1,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index1,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index2,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());

14
deps/v8/src/arm/ic-arm.cc

@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
__ b(ne, &non_double_value);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// Value is a double. Transition FAST_SMI_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
r4,
&slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);

5
deps/v8/src/arm/lithium-arm.cc

@ -2082,8 +2082,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =

9
deps/v8/src/arm/lithium-arm.h

@ -1236,6 +1236,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1252,13 +1253,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1272,6 +1273,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1740,6 +1742,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1762,6 +1765,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1806,6 +1810,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

104
deps/v8/src/arm/lithium-codegen-arm.cc

@ -2696,8 +2696,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ ubfx(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(eq, &done);
__ cmp(scratch, Operand(GetInitialFastElementsKind()));
__ b(lt, &fail);
__ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ b(le, &done);
__ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ b(lt, &fail);
__ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
@ -2744,7 +2746,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
__ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ ldr(result, FieldMemOperand(scratch, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2776,18 +2780,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) +
? Operand(((constant_key + instr->additional_index()) << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(elements, elements, operand);
if (!key_is_constant) {
__ add(elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
}
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ vldr(result, elements, 0);
}
@ -2809,26 +2816,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size))
? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), 0);
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), 0);
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
? MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset)
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
@ -2856,9 +2870,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3726,10 +3743,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
__ str(value, FieldMemOperand(elements, offset));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
if (instr->additional_index() != 0) {
__ add(scratch,
scratch,
Operand(instr->additional_index() << kPointerSizeLog2));
}
__ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@ -3771,7 +3794,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
Operand operand = key_is_constant
? Operand(constant_key * (1 << shift_size) +
? Operand((constant_key << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
@ -3789,7 +3812,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
vs);
}
__ vstr(value, scratch, 0);
__ vstr(value, scratch, instr->additional_index() << shift_size);
}
@ -3810,25 +3833,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
Operand operand(key_is_constant ? Operand(constant_key << shift_size)
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), 0);
__ vstr(double_scratch0().low(), scratch0(), additional_offset);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), 0);
__ vstr(value, scratch0(), additional_offset);
}
} else {
Register value(ToRegister(instr->value()));
if (instr->additional_index() != 0 && !key_is_constant) {
__ add(scratch0(), key, Operand(instr->additional_index()));
}
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
? MemOperand(external_pointer,
((constant_key + instr->additional_index())
<< shift_size))
: (instr->additional_index() == 0
? MemOperand(external_pointer, key, LSL, shift_size)
: MemOperand(external_pointer, scratch0(), LSL, shift_size)));
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -3847,7 +3878,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3884,20 +3918,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
__ mov(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(r2));
ASSERT(new_map_reg.is(r3));
@ -4671,8 +4707,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
@ -4823,10 +4860,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));

86
deps/v8/src/arm/macro-assembler-arm.cc

@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
@ -1997,22 +2002,17 @@ void MacroAssembler::CompareMap(Register obj,
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(current_map)));
}
}
}
}
@ -2865,28 +2865,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
cmp(map_in_out, ip);
ldr(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, scratch);
b(ne, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
ldr(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
ldr(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);

9
deps/v8/src/arm/macro-assembler-arm.h

@ -512,7 +512,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
@ -802,9 +803,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in

170
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -43,45 +43,49 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - r4 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - r5 : Pointer to current code object (Code*) including heap object tag.
* - r6 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - r7 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - r8 : points to tip of backtrack stack
* - r8 : Points to tip of backtrack stack
* - r9 : Unused, might be used by C code and expected unchanged.
* - r10 : End of input (points to byte after last character in input).
* - r11 : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - r12 : IP register, used by assembler. Very volatile.
* - r13/sp : points to tip of C stack.
* - r13/sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[56] Isolate* isolate (address of the current isolate)
* - fp[52] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[48] stack_area_base (high end of the memory area to use as
* backtracking stack).
* - fp[44] capture array size (may fit multiple sets of matches)
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
* - fp[32] return address (lr).
* - fp[28] old frame pointer (r11).
* - fp[32] return address (lr).
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-24] At start (if 1, we are starting at the start of the
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first
* - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@ -197,9 +201,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, &not_at_start);
BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -212,9 +216,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
__ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_not_at_start);
BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
@ -655,6 +659,7 @@ void RegExpMacroAssemblerARM::Fail() {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label return_r0;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -678,8 +683,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(r0, Operand(0, RelocInfo::NONE));
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
Label stack_ok;
@ -698,13 +704,13 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_r0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &exit_label_);
__ b(ne, &return_r0);
__ bind(&stack_ok);
@ -725,41 +731,45 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(r1, Operand(0, RelocInfo::NONE));
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop);
if (num_saved_registers_ > 8) {
// Address of register 0.
__ add(r1, frame_pointer(), Operand(kRegisterZero));
__ mov(r2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &init_loop);
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ str(r0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
__ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ mov(code_pointer(), Operand(masm_->CodeObject()));
// Load previous char as initial value of current character register.
Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), Operand('\n'));
__ jmp(&start_label_);
__ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@ -786,6 +796,10 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ ldr(r2, register_location(i));
__ ldr(r3, register_location(i + 1));
if (global()) {
// Keep capture start in r4 for the zero-length check later.
__ mov(r4, r2);
}
if (mode_ == UC16) {
__ add(r2, r1, Operand(r2, ASR, 1));
__ add(r3, r1, Operand(r3, ASR, 1));
@ -797,10 +811,54 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ str(r3, MemOperand(r0, kPointerSize, PostIndex));
}
}
__ mov(r0, Operand(SUCCESS));
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ add(r0, r0, Operand(1));
__ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ sub(r1, r1, Operand(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(r1, Operand(num_saved_registers_));
__ b(lt, &return_r0);
__ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ add(r2, r2, Operand(num_saved_registers_ * kPointerSize));
__ str(r2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r0 to initialize registers with its value in the next run.
__ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// r4: capture start index
__ cmp(current_input_offset(), r4);
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
__ cmp(current_input_offset(), Operand(0));
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ b(&load_char_start_regexp);
} else {
__ mov(r0, Operand(SUCCESS));
}
}
// Exit and return r0
__ bind(&exit_label_);
if (global()) {
__ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers r4..r11 and return (restoring lr to pc).
@ -822,7 +880,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &exit_label_);
__ b(ne, &return_r0);
// String might have moved: Reload end of string from frame.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -859,7 +917,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(r0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_r0);
}
CodeDesc code_desc;
@ -1014,8 +1072,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
}
void RegExpMacroAssemblerARM::Succeed() {
bool RegExpMacroAssemblerARM::Succeed() {
__ jmp(&success_label_);
return global();
}
@ -1307,8 +1366,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
offset = r0;
// r4 is not being used to store the capture start index at this point.
__ add(r4, current_input_offset(), Operand(cp_offset * char_size()));
offset = r4;
}
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
// and the operating system running on the target allow it.

13
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -137,7 +137,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@ -149,10 +150,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize;
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

12
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -49,16 +49,16 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*);
void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@ -401,9 +401,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \

50
deps/v8/src/arm/stub-cache-arm.cc

@ -1581,16 +1581,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
__ CheckFastSmiElements(r3, r7, &call_builtin);
// edx: receiver
// r3: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@ -3372,8 +3385,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3497,8 +3513,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3838,8 +3857,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3902,8 +3924,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4042,8 +4067,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4225,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@ -4253,7 +4281,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
DONT_DO_SMI_CHECK);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4263,7 +4291,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
ASSERT(IsFastObjectElementsKind(elements_kind));
__ add(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));

10
deps/v8/src/array.js

@ -827,7 +827,8 @@ function ArraySort(comparefn) {
var element = a[i];
var order = %_CallFunction(receiver, element, pivot, comparefn);
if (order < 0) {
%_SwapElements(a, i, low_end);
a[i] = a[low_end];
a[low_end] = element;
low_end++;
} else if (order > 0) {
do {
@ -836,9 +837,12 @@ function ArraySort(comparefn) {
var top_elem = a[high_start];
order = %_CallFunction(receiver, top_elem, pivot, comparefn);
} while (order > 0);
%_SwapElements(a, i, high_start);
a[i] = a[high_start];
a[high_start] = element;
if (order < 0) {
%_SwapElements(a, i, low_end);
element = a[i];
a[i] = a[low_end];
a[low_end] = element;
low_end++;
}
}

20
deps/v8/src/bootstrapper.cc

@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->
set_instance_descriptors(heap->empty_descriptor_array());
object_function_map->set_instance_descriptors(
heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
@ -516,12 +516,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
function_instance_map_writable_prototype_->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
function_without_prototype_map->instance_descriptors());
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
empty_function_map->set_prototype(
global_context()->object_function()->prototype());
empty_function->set_map(*empty_function_map);
return empty_function;
}
@ -1094,7 +1092,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements());
ASSERT(result->HasFastObjectElements());
#endif
}
@ -1187,7 +1185,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Check the state of the object.
ASSERT(result->HasFastProperties());
ASSERT(result->HasFastElements());
ASSERT(result->HasFastObjectElements());
#endif
}
@ -1637,7 +1635,7 @@ bool Genesis::InstallNatives() {
array_function->initial_map()->CopyDropTransitions();
Map* new_map;
if (!maybe_map->To<Map>(&new_map)) return false;
new_map->set_elements_kind(FAST_ELEMENTS);
new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
array_function->set_initial_map(new_map);
// Make "length" magic on instances.

99
deps/v8/src/builtins.cc

@ -200,9 +200,12 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array->set_elements(heap->empty_fixed_array());
if (!FLAG_smi_only_arrays) {
Context* global_context = isolate->context()->global_context();
if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
!global_context->object_js_array_map()->IsUndefined()) {
array->set_map(Map::cast(global_context->object_js_array_map()));
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
!global_context->js_array_maps()->IsUndefined()) {
FixedArray* map_array =
FixedArray::cast(global_context->js_array_maps());
array->set_map(Map::cast(map_array->
get(TERMINAL_FAST_ELEMENTS_KIND)));
}
}
} else {
@ -222,6 +225,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
{ MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
}
ElementsKind elements_kind = array->GetElementsKind();
if (!IsFastHoleyElementsKind(elements_kind)) {
elements_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe_array =
array->TransitionElementsKind(elements_kind);
if (maybe_array->IsFailure()) return maybe_array;
}
// We do not use SetContent to skip the unnecessary elements type check.
array->set_elements(FixedArray::cast(fixed_array));
array->set_length(Smi::cast(obj));
@ -250,7 +260,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Allocate an appropriately typed elements array.
MaybeObject* maybe_elms;
ElementsKind elements_kind = array->GetElementsKind();
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (IsFastDoubleElementsKind(elements_kind)) {
maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
number_of_elements);
} else {
@ -261,13 +271,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
// Fill in the content
switch (array->GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS: {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS: {
FixedArray* smi_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
}
break;
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@ -277,6 +289,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
}
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {
@ -412,19 +425,17 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
HeapObject* elms = array->elements();
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (array->HasFastElements()) return elms;
if (args == NULL) {
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
array->TransitionElementsKind(FAST_ELEMENTS);
if (maybe_transition->IsFailure()) return maybe_transition;
}
if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
array->TransitionElementsKind(FAST_ELEMENTS);
if (maybe_transition->IsFailure()) return maybe_transition;
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastElements() ||
if (args == NULL || array->HasFastObjectElements() ||
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
@ -518,8 +529,8 @@ BUILTIN(ArrayPush) {
}
FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@ -590,7 +601,7 @@ BUILTIN(ArrayShift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
@ -632,7 +643,7 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -654,8 +665,8 @@ BUILTIN(ArrayUnshift) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@ -684,7 +695,7 @@ BUILTIN(ArraySlice) {
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastTypeElements() ||
if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -700,7 +711,7 @@ BUILTIN(ArraySlice) {
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastTypeElements();
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -765,9 +776,9 @@ BUILTIN(ArraySlice) {
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
elements_kind, 0, result_len);
return result_array;
}
@ -788,7 +799,7 @@ BUILTIN(ArraySplice) {
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastTypeElements());
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
@ -839,9 +850,9 @@ BUILTIN(ArraySplice) {
{
// Fill newly created array.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
elements_kind, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -890,12 +901,13 @@ BUILTIN(ArraySplice) {
{
// Copy the part before actual_start as is.
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, FAST_ELEMENTS,
CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
new_elms, kind,
actual_start + item_count, to_copy);
}
@ -942,11 +954,12 @@ BUILTIN(ArrayConcat) {
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
ElementsKind elements_kind = GetInitialFastElementsKind();
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
if (!arg->IsJSArray() ||
!JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
@ -963,8 +976,18 @@ BUILTIN(ArrayConcat) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
elements_kind = FAST_ELEMENTS;
if (!JSArray::cast(arg)->HasFastSmiElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
}
}
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
@ -984,8 +1007,8 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, elements_kind,
start_pos, len);
start_pos += len;
}

32
deps/v8/src/code-stubs.cc

@ -262,10 +262,13 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
break;
case EXTERNAL_BYTE_ELEMENTS:
@ -292,7 +295,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS: {
case FAST_HOLEY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS: {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_js_array_,
elements_kind_,
@ -300,6 +305,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
}
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_js_array_,
grow_mode_);
@ -430,24 +436,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (to_ == FAST_ELEMENTS) {
if (from_ == FAST_SMI_ONLY_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
} else if (from_ == FAST_DOUBLE_ELEMENTS) {
if (IsFastSmiOrObjectElementsKind(to_)) {
if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
} else {
UNREACHABLE();
}
KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
is_jsarray_,
FAST_ELEMENTS,
to_,
grow_mode_);
} else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
} else if (IsFastSmiElementsKind(from_) &&
IsFastDoubleElementsKind(to_)) {
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm);
} else {
UNREACHABLE();
}

6
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateSmiOnlyToObject(MacroAssembler* masm);
static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
private:

2
deps/v8/src/compiler.cc

@ -118,7 +118,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
FLAG_crankshaft &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
function()->scope()->allows_lazy_recompilation() &&
function()->scope()->AllowsLazyRecompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}

20
deps/v8/src/contexts.h

@ -106,9 +106,7 @@ enum BindingFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@ -248,9 +246,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
INTERNAL_ARRAY_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
SMI_JS_ARRAY_MAP_INDEX,
DOUBLE_JS_ARRAY_MAP_INDEX,
OBJECT_JS_ARRAY_MAP_INDEX,
JS_ARRAY_MAPS_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
@ -373,18 +369,6 @@ class Context: public FixedArray {
Object* OptimizedFunctionsListHead();
void ClearOptimizedFunctions();
static int GetContextMapIndexFromElementsKind(
ElementsKind elements_kind) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
} else if (elements_kind == FAST_ELEMENTS) {
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
return Context::SMI_JS_ARRAY_MAP_INDEX;
}
}
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
void set_##name(type* value) { \
ASSERT(IsGlobalContext()); \

22
deps/v8/src/d8.cc

@ -26,7 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows.
// Defined when linking against shared lib on Windows.
#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
#define V8_SHARED
#endif
@ -833,8 +834,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbinary"),
FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readbuffer"),
FunctionTemplate::New(ReadBuffer));
global_template->Set(String::New("readline"),
@ -1055,23 +1054,6 @@ static char* ReadChars(const char* name, int* size_out) {
}
Handle<Value> Shell::ReadBinary(const Arguments& args) {
String::Utf8Value filename(args[0]);
int size;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
char* chars = ReadChars(*filename, &size);
if (chars == NULL) {
return ThrowException(String::New("Error reading file"));
}
// We skip checking the string for UTF8 characters and use it raw as
// backing store for the external string with 8-bit characters.
BinaryResource* resource = new BinaryResource(chars, size);
return String::NewExternal(resource);
}
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
String::Utf8Value filename(args[0]);
int length;

1
deps/v8/src/d8.h

@ -307,7 +307,6 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBinary(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {

32
deps/v8/src/debug-agent.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -247,7 +247,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
while (!(c == '\n' && prev_c == '\r')) {
prev_c = c;
received = conn->Receive(&c, 1);
if (received <= 0) {
if (received == 0) {
PrintF("Error %d\n", Socket::LastError());
return SmartArrayPointer<char>();
}
@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn,
const char* embedding_host) {
static const int kBufferSize = 80;
char buffer[kBufferSize]; // Sending buffer.
bool ok;
int len;
int r;
// Send the header.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Type: connect\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"V8-Version: %s\r\n", v8::V8::GetVersion());
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Protocol-Version: 1\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
if (embedding_host != NULL) {
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"Embedding-Host: %s\r\n", embedding_host);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
}
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize),
"%s: 0\r\n", kContentLength);
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// Terminate header with empty line.
len = OS::SNPrintF(Vector<char>(buffer, kBufferSize), "\r\n");
r = conn->Send(buffer, len);
if (r != len) return false;
ok = conn->Send(buffer, len);
if (!ok) return false;
// No body for connect message.
@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
int total_received = 0;
while (total_received < len) {
int received = conn->Receive(data + total_received, len - total_received);
if (received <= 0) {
if (received == 0) {
return total_received;
}
total_received += received;

134
deps/v8/src/elements-kind.cc

@ -0,0 +1,134 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "elements-kind.h"
#include "api.h"
#include "elements.h"
#include "objects.h"
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
ElementsKind GetInitialFastElementsKind() {
if (FLAG_packed_arrays) {
return FAST_SMI_ELEMENTS;
} else {
return FAST_HOLEY_SMI_ELEMENTS;
}
}
struct InitializeFastElementsKindSequence {
static void Construct(
ElementsKind** fast_elements_kind_sequence_ptr) {
ElementsKind* fast_elements_kind_sequence =
new ElementsKind[kFastElementsKindCount];
*fast_elements_kind_sequence_ptr = fast_elements_kind_sequence;
STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND);
fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS;
fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS;
fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS;
fast_elements_kind_sequence[4] = FAST_ELEMENTS;
fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS;
}
};
static LazyInstance<ElementsKind*,
InitializeFastElementsKindSequence>::type
fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER;
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
ASSERT(sequence_number >= 0 &&
sequence_number < kFastElementsKindCount);
return fast_elements_kind_sequence.Get()[sequence_number];
}
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
for (int i = 0; i < kFastElementsKindCount; ++i) {
if (fast_elements_kind_sequence.Get()[i] == elements_kind) {
return i;
}
}
UNREACHABLE();
return 0;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed) {
ASSERT(IsFastElementsKind(elements_kind));
ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
while (true) {
int index =
GetSequenceIndexFromFastElementsKind(elements_kind) + 1;
elements_kind = GetFastElementsKindFromSequenceIndex(index);
if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
return elements_kind;
}
}
UNREACHABLE();
return TERMINAL_FAST_ELEMENTS_KIND;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
switch (from_kind) {
case FAST_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS;
case FAST_HOLEY_SMI_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS;
case FAST_DOUBLE_ELEMENTS:
return to_kind != FAST_SMI_ELEMENTS &&
to_kind != FAST_HOLEY_SMI_ELEMENTS &&
to_kind != FAST_DOUBLE_ELEMENTS;
case FAST_HOLEY_DOUBLE_ELEMENTS:
return to_kind == FAST_ELEMENTS ||
to_kind == FAST_HOLEY_ELEMENTS;
case FAST_ELEMENTS:
return to_kind == FAST_HOLEY_ELEMENTS;
case FAST_HOLEY_ELEMENTS:
return false;
default:
return false;
}
}
} } // namespace v8::internal

210
deps/v8/src/elements-kind.h

@ -0,0 +1,210 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ELEMENTS_KIND_H_
#define V8_ELEMENTS_KIND_H_
#include "v8checks.h"
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
FAST_HOLEY_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
FAST_HOLEY_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS,
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index);
int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
inline bool IsFastElementsKind(ElementsKind kind) {
ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastDoubleElementsKind(ElementsKind kind) {
return kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastSmiElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS;
}
inline bool IsFastObjectElementsKind(ElementsKind kind) {
return kind == FAST_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsFastHoleyElementsKind(ElementsKind kind) {
return kind == FAST_HOLEY_SMI_ELEMENTS ||
kind == FAST_HOLEY_DOUBLE_ELEMENTS ||
kind == FAST_HOLEY_ELEMENTS;
}
inline bool IsHoleyElementsKind(ElementsKind kind) {
return IsFastHoleyElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS;
}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_DOUBLE_ELEMENTS ||
kind == FAST_ELEMENTS;
}
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) {
return FAST_SMI_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
return FAST_DOUBLE_ELEMENTS;
}
if (holey_kind == FAST_HOLEY_ELEMENTS) {
return FAST_ELEMENTS;
}
return holey_kind;
}
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == FAST_SMI_ELEMENTS) {
return FAST_HOLEY_SMI_ELEMENTS;
}
if (packed_kind == FAST_DOUBLE_ELEMENTS) {
return FAST_HOLEY_DOUBLE_ELEMENTS;
}
if (packed_kind == FAST_ELEMENTS) {
return FAST_HOLEY_ELEMENTS;
}
return packed_kind;
}
inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
ASSERT(IsFastSmiElementsKind(from_kind));
return (from_kind == FAST_SMI_ELEMENTS)
? FAST_ELEMENTS
: FAST_HOLEY_ELEMENTS;
}
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsFastSmiElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind));
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
}
ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
bool allow_only_packed);
inline bool CanTransitionToMoreGeneralFastElementsKind(
ElementsKind elements_kind,
bool allow_only_packed) {
return IsFastElementsKind(elements_kind) &&
(elements_kind != TERMINAL_FAST_ELEMENTS_KIND &&
(!allow_only_packed || elements_kind != FAST_ELEMENTS));
}
} } // namespace v8::internal
#endif // V8_ELEMENTS_KIND_H_

370
deps/v8/src/elements.cc

@ -39,8 +39,14 @@
// Inheritance hierarchy:
// - ElementsAccessorBase (abstract)
// - FastElementsAccessor (abstract)
// - FastObjectElementsAccessor
// - FastSmiOrObjectElementsAccessor
// - FastPackedSmiElementsAccessor
// - FastHoleySmiElementsAccessor
// - FastPackedObjectElementsAccessor
// - FastHoleyObjectElementsAccessor
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - ExternalElementsAccessor (abstract)
// - ExternalByteElementsAccessor
// - ExternalUnsignedByteElementsAccessor
@ -65,9 +71,15 @@ namespace internal {
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \
V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \
FixedArray) \
V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \
V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \
FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
@ -139,8 +151,6 @@ void CopyObjectToObjectElements(FixedArray* from,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -148,7 +158,7 @@ void CopyObjectToObjectElements(FixedArray* from,
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -160,12 +170,15 @@ void CopyObjectToObjectElements(FixedArray* from,
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
@ -190,7 +203,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// Fast object arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -200,7 +213,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
#endif
}
ASSERT(to != from);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
@ -216,7 +229,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
to->set_the_hole(i + to_start);
}
}
if (to_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(to_kind)) {
if (!heap->InNewSpace(to)) {
heap->RecordWrites(to->address(),
to->OffsetOfElementAt(to_start),
@ -234,7 +247,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -242,7 +255,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to->length(); ++i) {
@ -255,14 +268,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
ASSERT(IsFastObjectElementsKind(to_kind));
// Because Double -> Object elements transitions allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
@ -404,6 +417,38 @@ class ElementsAccessorBase : public ElementsAccessor {
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
static void ValidateContents(JSObject* holder, int length) {
}
static void ValidateImpl(JSObject* holder) {
FixedArrayBase* fixed_array_base = holder->elements();
// When objects are first allocated, its elements are Failures.
if (fixed_array_base->IsFailure()) return;
if (!fixed_array_base->IsHeapObject()) return;
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->raw_unchecked_one_pointer_filler_map() ||
map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
int length = 0;
if (holder->IsJSArray()) {
Object* length_obj = JSArray::cast(holder)->length();
if (length_obj->IsSmi()) {
length = Smi::cast(length_obj)->value();
}
} else {
length = fixed_array_base->length();
}
ElementsAccessorSubclass::ValidateContents(holder, length);
}
virtual void Validate(JSObject* holder) {
ElementsAccessorSubclass::ValidateImpl(holder);
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
@ -455,9 +500,10 @@ class ElementsAccessorBase : public ElementsAccessor {
Object* length,
BackingStore* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
int length) {
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
int capacity,
int length) {
return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
array,
capacity,
@ -623,6 +669,7 @@ class FastElementsAccessor
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
friend class NonStrictArgumentsElementsAccessor;
typedef typename KindTraits::BackingStore BackingStore;
@ -633,10 +680,21 @@ class FastElementsAccessor
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
bool same_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
// Check whether the backing store should be shrunk.
if (length <= old_capacity) {
if (array->HasFastTypeElements()) {
if (array->HasFastSmiOrObjectElements()) {
MaybeObject* maybe_obj = array->EnsureWritableFastElements();
if (!maybe_obj->To(&backing_store)) return maybe_obj;
}
@ -668,39 +726,40 @@ class FastElementsAccessor
MaybeObject* result = FastElementsAccessorSubclass::
SetFastElementsCapacityAndLength(array, new_capacity, length);
if (result->IsFailure()) return result;
array->ValidateElements();
return length_object;
}
// Request conversion to slow elements.
return array->GetHeap()->undefined_value();
}
};
class FastObjectElementsAccessor
: public FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize> {
public:
explicit FastObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
ASSERT(obj->HasFastElements() ||
obj->HasFastSmiOnlyElements() ||
uint32_t key,
JSReceiver::DeleteMode mode) {
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
FixedArray* backing_store = FixedArray::cast(obj->elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
backing_store = FixedArray::cast(backing_store->get(1));
backing_store =
KindTraits::BackingStore::cast(
FixedArray::cast(backing_store)->get(1));
} else {
Object* writable;
MaybeObject* maybe = obj->EnsureWritableFastElements();
if (!maybe->ToObject(&writable)) return maybe;
backing_store = FixedArray::cast(writable);
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
MaybeObject* transitioned =
obj->TransitionElementsKind(GetHoleyElementsKind(kind));
if (transitioned->IsFailure()) return transitioned;
}
if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
Object* writable;
MaybeObject* maybe = obj->EnsureWritableFastElements();
if (!maybe->ToObject(&writable)) return maybe;
backing_store = KindTraits::BackingStore::cast(writable);
}
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
@ -712,15 +771,14 @@ class FastObjectElementsAccessor
// has too few used values, normalize it.
// To avoid doing the check on every delete we require at least
// one adjacent hole to the value being deleted.
Object* hole = heap->the_hole_value();
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() >= kMinLengthForSparsenessCheck &&
!heap->InNewSpace(backing_store) &&
((key > 0 && backing_store->get(key - 1) == hole) ||
(key + 1 < length && backing_store->get(key + 1) == hole))) {
((key > 0 && backing_store->is_the_hole(key - 1)) ||
(key + 1 < length && backing_store->is_the_hole(key + 1)))) {
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
if (backing_store->get(i) != hole) ++num_used;
if (!backing_store->is_the_hole(i)) ++num_used;
// Bail out early if more than 1/4 is used.
if (4 * num_used > backing_store->length()) break;
}
@ -733,27 +791,75 @@ class FastObjectElementsAccessor
return heap->true_value();
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key, mode);
}
static bool HasElementImpl(
Object* receiver,
JSObject* holder,
uint32_t key,
typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
#if DEBUG
FixedArrayBase* elements = holder->elements();
Heap* heap = elements->GetHeap();
Map* map = elements->map();
ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
(map == heap->fixed_array_map() ||
map == heap->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(KindTraits::Kind) ==
((map == heap->fixed_array_map() && length == 0) ||
map == heap->fixed_double_array_map())));
for (int i = 0; i < length; i++) {
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(elements);
ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
static_cast<Object*>(backing_store->get(i))->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
backing_store->is_the_hole(i)));
}
#endif
}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize> {
public:
explicit FastSmiOrObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kPointerSize>(name) {}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
CopyObjectToObjectElements(
FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
}
case FAST_DOUBLE_ELEMENTS:
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
if (IsFastSmiOrObjectElementsKind(to_kind)) {
CopyObjectToObjectElements(
FixedArray::cast(from), KindTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) {
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
} else {
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
@ -762,51 +868,85 @@ class FastObjectElementsAccessor
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
JSObject::SetFastElementsCapacityMode set_capacity_mode =
obj->HasFastSmiOnlyElements()
? JSObject::kAllowSmiOnlyElements
: JSObject::kDontAllowSmiOnlyElements;
JSObject::SetFastElementsCapacitySmiMode set_capacity_mode =
obj->HasFastSmiElements()
? JSObject::kAllowSmiElements
: JSObject::kDontAllowSmiElements;
return obj->SetFastElementsCapacityAndLength(capacity,
length,
set_capacity_mode);
}
};
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
return DeleteCommon(obj, key);
}
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> > {
public:
explicit FastPackedSmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
ElementsKindTraits<FAST_SMI_ELEMENTS> >(name) {}
};
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> > {
public:
explicit FastHoleySmiElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
ElementsKindTraits<FAST_HOLEY_SMI_ELEMENTS> >(name) {}
};
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> > {
public:
explicit FastPackedObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS> >(name) {}
};
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> > {
public:
explicit FastHoleyObjectElementsAccessor(const char* name)
: FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_HOLEY_ELEMENTS> >(name) {}
};
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastDoubleElementsAccessor
: public FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
: public FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
return obj->SetFastDoubleElementsCapacityAndLength(capacity,
length);
}
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
friend class FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
@ -814,12 +954,15 @@ class FastDoubleElementsAccessor
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
@ -829,26 +972,35 @@ class FastDoubleElementsAccessor
}
return to->GetHeap()->undefined_value();
}
};
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
int length = obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: FixedDoubleArray::cast(obj->elements())->length();
if (key < static_cast<uint32_t>(length)) {
FixedDoubleArray::cast(obj->elements())->set_the_hole(key);
}
return obj->GetHeap()->true_value();
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
return key < static_cast<uint32_t>(backing_store->length()) &&
!backing_store->is_the_hole(key);
}
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
explicit FastPackedDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >(name) {}
};
class FastHoleyDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> > {
public:
friend class ElementsAccessorBase<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >;
explicit FastHoleyDoubleElementsAccessor(const char* name)
: FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<FAST_HOLEY_DOUBLE_ELEMENTS> >(name) {}
};
@ -1115,13 +1267,16 @@ class DictionaryElementsAccessor
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
@ -1248,7 +1403,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
if (arguments->IsDictionary()) {
return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
} else {
return FastObjectElementsAccessor::DeleteCommon(obj, key);
// It's difficult to access the version of DeleteCommon that is declared
// in the templatized super class, call the concrete implementation in
// the class for the most generalized ElementsKind subclass.
return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode);
}
}
return obj->GetHeap()->true_value();
@ -1312,7 +1470,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
if (array->IsDictionary()) {
return elements_accessors_[DICTIONARY_ELEMENTS];
} else {
return elements_accessors_[FAST_ELEMENTS];
return elements_accessors_[FAST_HOLEY_ELEMENTS];
}
case EXTERNAL_BYTE_ARRAY_TYPE:
return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];

5
deps/v8/src/elements.h

@ -28,6 +28,7 @@
#ifndef V8_ELEMENTS_H_
#define V8_ELEMENTS_H_
#include "elements-kind.h"
#include "objects.h"
#include "heap.h"
#include "isolate.h"
@ -45,6 +46,10 @@ class ElementsAccessor {
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(JSObject* obj) = 0;
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with

5
deps/v8/src/factory.cc

@ -775,7 +775,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
instance_size != JSObject::kHeaderSize) {
Handle<Map> initial_map = NewMap(type,
instance_size,
FAST_SMI_ONLY_ELEMENTS);
GetInitialFastElementsKind());
function->set_initial_map(*initial_map);
initial_map->set_constructor(*function);
}
@ -1013,10 +1013,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
void Factory::EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->EnsureCanContainElements(*elements, mode));
array->EnsureCanContainElements(*elements, length, mode));
}

17
deps/v8/src/factory.h

@ -216,9 +216,10 @@ class Factory {
Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
Handle<Object> value);
Handle<Map> NewMap(InstanceType type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
Handle<Map> NewMap(
InstanceType type,
int instance_size,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@ -269,13 +270,14 @@ class Factory {
Handle<JSModule> NewJSModule();
// JS arrays are pretenured when allocated by the parser.
Handle<JSArray> NewJSArray(int capacity,
ElementsKind elements_kind = FAST_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArray(
int capacity,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = FAST_ELEMENTS,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED);
void SetElementsCapacityAndLength(Handle<JSArray> array,
@ -287,6 +289,7 @@ class Factory {
void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
void EnsureCanContainElements(Handle<JSArray> array,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);

3
deps/v8/src/flag-definitions.h

@ -150,6 +150,7 @@ DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony_modules, harmony_scoping)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(clever_optimizations,
true,
@ -197,6 +198,8 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, false,
"perform array index dehoisting")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")

3
deps/v8/src/full-codegen.cc

@ -314,7 +314,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
code->set_optimizable(info->IsOptimizable() &&
!info->function()->flags()->Contains(kDontOptimize));
!info->function()->flags()->Contains(kDontOptimize) &&
info->function()->scope()->AllowsLazyRecompilation());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateTypeFeedbackCells(code);

2
deps/v8/src/func-name-inferrer.h

@ -88,6 +88,8 @@ class FuncNameInferrer : public ZoneObject {
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
if (entries_stack_.is_empty())
funcs_to_infer_.Clear();
}
private:

20
deps/v8/src/heap-inl.h

@ -595,12 +595,24 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
void ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
ASSERT(heap_->InNewSpace(new_space_strings_[i]));
ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
}
#endif
}

47
deps/v8/src/heap.cc

@ -2469,7 +2469,7 @@ bool Heap::CreateApiObjects() {
// bottleneck to trap the Smi-only -> fast elements transition, and there
// appears to be no benefit for optimize this case.
Map* new_neander_map = Map::cast(obj);
new_neander_map->set_elements_kind(FAST_ELEMENTS);
new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
set_neander_map(new_neander_map);
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
@ -3050,6 +3050,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type,
}
JSMessageObject* message = JSMessageObject::cast(result);
message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->initialize_elements();
message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
message->set_type(type);
message->set_arguments(arguments);
@ -3326,6 +3327,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
return Failure::OutOfMemoryException();
}
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
Map* map = external_ascii_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3751,7 +3754,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Check the state of the object
ASSERT(JSObject::cast(result)->HasFastProperties());
ASSERT(JSObject::cast(result)->HasFastElements());
ASSERT(JSObject::cast(result)->HasFastObjectElements());
return result;
}
@ -3796,7 +3799,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_inobject_properties(in_object_properties);
map->set_unused_property_fields(in_object_properties);
map->set_prototype(prototype);
ASSERT(map->has_fast_elements());
ASSERT(map->has_fast_object_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
@ -3913,8 +3916,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
JSObject::cast(obj)->HasFastElements());
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
return obj;
}
@ -3959,6 +3961,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@ -3979,8 +3984,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
}
} else {
ASSERT(elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedArray(capacity);
} else {
@ -4006,6 +4010,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements(
array->set_elements(elements);
array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array;
}
@ -4490,6 +4495,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
#ifdef DEBUG
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif // DEBUG
return result;
}
@ -4536,13 +4551,13 @@ MaybeObject* Heap::AllocateJSArray(
Context* global_context = isolate()->context()->global_context();
JSFunction* array_function = global_context->array_function();
Map* map = array_function->initial_map();
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
map = Map::cast(global_context->double_js_array_map());
} else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
map = Map::cast(global_context->object_js_array_map());
} else {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(map == global_context->smi_js_array_map());
Object* maybe_map_array = global_context->js_array_maps();
if (!maybe_map_array->IsUndefined()) {
Object* maybe_transitioned_map =
FixedArray::cast(maybe_map_array)->get(elements_kind);
if (!maybe_transitioned_map->IsUndefined()) {
map = Map::cast(maybe_transitioned_map);
}
}
return AllocateJSObjectFromMap(map, pretenure);
@ -4827,9 +4842,7 @@ MaybeObject* Heap::AllocateGlobalContext() {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(global_context_map());
context->set_smi_js_array_map(undefined_value());
context->set_double_js_array_map(undefined_value());
context->set_object_js_array_map(undefined_value());
context->set_js_array_maps(undefined_value());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;

2
deps/v8/src/heap.h

@ -621,7 +621,7 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateMap(
InstanceType instance_type,
int instance_size,
ElementsKind elements_kind = FAST_ELEMENTS);
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
// Allocates a partial map for bootstrapping.
MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,

23
deps/v8/src/hydrogen-instructions.cc

@ -1685,6 +1685,9 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
if (hole_check_mode_ == PERFORM_HOLE_CHECK) {
stream->Add(" check_hole");
}
}
@ -1736,7 +1739,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
HLoadKeyedFastElement::OMIT_HOLE_CHECK);
OMIT_HOLE_CHECK);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
object(), index);
map_check->InsertBefore(this);
@ -1784,8 +1787,11 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo(
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1882,9 +1888,12 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -1899,7 +1908,13 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo(
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
ElementsKind from_kind = original_map()->elements_kind();
ElementsKind to_kind = transitioned_map()->elements_kind();
stream->Add(" %p [%s] -> %p [%s]",
*original_map(),
ElementsAccessor::ForKind(from_kind)->name(),
*transitioned_map(),
ElementsAccessor::ForKind(to_kind)->name());
}

182
deps/v8/src/hydrogen-instructions.h

@ -2083,28 +2083,21 @@ class HCheckMaps: public HTemplateInstruction<2> {
HCheckMaps* check_map = new HCheckMaps(object, map);
SmallMapList* map_set = check_map->map_set();
// If the map to check has the untransitioned elements, it can be hoisted
// above TransitionElements instructions.
if (map->has_fast_smi_only_elements()) {
check_map->ClearGVNFlag(kDependsOnElementsKind);
}
Map* transitioned_fast_element_map =
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL);
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
map_set->Add(Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map =
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL);
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
map_set->Add(Handle<Map>(transitioned_double_map));
}
// Since transitioned elements maps of the initial map don't fail the map
// check, the CheckMaps instruction doesn't need to depend on ElementsKinds.
check_map->ClearGVNFlag(kDependsOnElementsKind);
ElementsKind kind = map->elements_kind();
bool packed = IsFastPackedElementsKind(kind);
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
Map* transitioned_map =
map->LookupElementsTransitionMap(kind, NULL);
if (transitioned_map) {
map_set->Add(Handle<Map>(transitioned_map));
}
};
map_set->Sort();
return check_map;
}
@ -3946,15 +3939,28 @@ class HLoadFunctionPrototype: public HUnaryOperation {
virtual bool DataEquals(HValue* other) { return true; }
};
class HLoadKeyedFastElement: public HTemplateInstruction<2> {
class ArrayInstructionInterface {
public:
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
virtual void SetIndexOffset(uint32_t index_offset) = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
};
enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
class HLoadKeyedFastElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastElement(HValue* obj,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: hole_check_mode_(hole_check_mode) {
: hole_check_mode_(hole_check_mode),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
set_representation(Representation::Tagged());
@ -3964,6 +3970,12 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -3982,26 +3994,43 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastElement()) return false;
HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
return false;
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
HoleCheckMode hole_check_mode_;
uint32_t index_offset_;
bool is_dehoisted_;
};
class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
class HLoadKeyedFastDoubleElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
HLoadKeyedFastDoubleElement(
HValue* elements,
HValue* key,
HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
: index_offset_(0),
is_dehoisted_(false),
hole_check_mode_(hole_check_mode) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
set_representation(Representation::Double());
SetGVNFlag(kDependsOnDoubleArrayElements);
SetFlag(kUseGVN);
}
}
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Representation RequiredInputRepresentation(int index) {
// The key is supposed to be Integer32.
@ -4010,21 +4039,38 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
: Representation::Integer32();
}
bool RequiresHoleCheck() {
return hole_check_mode_ == PERFORM_HOLE_CHECK;
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual bool DataEquals(HValue* other) {
if (!other->IsLoadKeyedFastDoubleElement()) return false;
HLoadKeyedFastDoubleElement* other_load =
HLoadKeyedFastDoubleElement::cast(other);
return hole_check_mode_ == other_load->hole_check_mode_;
}
private:
uint32_t index_offset_;
bool is_dehoisted_;
HoleCheckMode hole_check_mode_;
};
class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
class HLoadKeyedSpecializedArrayElement
: public HTemplateInstruction<2>, public ArrayInstructionInterface {
public:
HLoadKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
ElementsKind elements_kind)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind),
index_offset_(0),
is_dehoisted_(false) {
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@ -4052,6 +4098,12 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
HValue* external_pointer() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
virtual Range* InferRange(Zone* zone);
@ -4067,6 +4119,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -4188,11 +4242,12 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
class HStoreKeyedFastElement: public HTemplateInstruction<3> {
class HStoreKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4210,8 +4265,14 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
bool value_is_smi() {
return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
return IsFastSmiElementsKind(elements_kind_);
}
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
if (value_is_smi()) {
@ -4227,14 +4288,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
class HStoreKeyedFastDoubleElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedFastDoubleElement(HValue* elements,
HValue* key,
HValue* val) {
HValue* val)
: index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, elements);
SetOperandAt(1, key);
SetOperandAt(2, val);
@ -4254,6 +4319,12 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
@ -4264,16 +4335,21 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
private:
uint32_t index_offset_;
bool is_dehoisted_;
};
class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
class HStoreKeyedSpecializedArrayElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
HStoreKeyedSpecializedArrayElement(HValue* external_elements,
HValue* key,
HValue* val,
ElementsKind elements_kind)
: elements_kind_(elements_kind) {
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetGVNFlag(kChangesSpecializedArrayElements);
SetOperandAt(0, external_elements);
SetOperandAt(1, key);
@ -4301,11 +4377,19 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
};
@ -4352,9 +4436,19 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
transitioned_map_(transitioned_map) {
SetOperandAt(0, object);
SetFlag(kUseGVN);
// Don't set GVN DependOn flags here. That would defeat GVN's detection of
// congruent HTransitionElementsKind instructions. Instruction hoisting
// handles HTransitionElementsKind instruction specially, explicitly adding
// DependsOn flags during its dependency calculations.
SetGVNFlag(kChangesElementsKind);
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
if (original_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
if (transitioned_map->has_fast_double_elements()) {
SetGVNFlag(kChangesElementsPointer);
SetGVNFlag(kChangesNewSpacePromotion);
}
set_representation(Representation::Tagged());
}
@ -4592,7 +4686,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); }
ElementsKind boilerplate_elements_kind() const {
if (!boilerplate_object_->IsJSObject()) {
return FAST_ELEMENTS;
return TERMINAL_FAST_ELEMENTS_KIND;
}
return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
}

213
deps/v8/src/hydrogen.cc

@ -1709,23 +1709,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (instr->IsTransitionElementsKind()) {
// It's possible to hoist transitions out of a loop as long as the
// hoisting wouldn't move the transition past a DependsOn of one of it's
// changes or any instructions that might change an objects map or
// elements contents.
GVNFlagSet changes = instr->ChangesFlags();
// hoisting wouldn't move the transition past an instruction that has a
// DependsOn flag for anything it changes.
GVNFlagSet hoist_depends_blockers =
HValue::ConvertChangesToDependsFlags(changes);
// In addition to not hoisting transitions above other instructions that
// change dependencies that the transition changes, it must not be
// hoisted above map changes and stores to an elements backing store
// that the transition might change.
GVNFlagSet hoist_change_blockers = changes;
hoist_change_blockers.Add(kChangesMaps);
HValue::ConvertChangesToDependsFlags(instr->ChangesFlags());
// In addition, the transition must not be hoisted above elements kind
// changes, or if the transition is destructive to the elements buffer,
// changes to array pointer or array contents.
GVNFlagSet hoist_change_blockers;
hoist_change_blockers.Add(kChangesElementsKind);
HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
if (trans->original_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesDoubleArrayElements);
}
if (trans->transitioned_map()->has_fast_double_elements()) {
hoist_change_blockers.Add(kChangesElementsPointer);
hoist_change_blockers.Add(kChangesArrayElements);
}
if (FLAG_trace_gvn) {
@ -2758,6 +2758,7 @@ HGraph* HGraphBuilder::CreateGraph() {
sce.Process();
graph()->EliminateRedundantBoundsChecks();
graph()->DehoistSimpleArrayIndexComputations();
return graph();
}
@ -3016,7 +3017,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
HBoundsCheck* check = HBoundsCheck::cast(i);
check->ReplaceAllUsesWith(check->index());
isolate()->counters()->array_bounds_checks_seen()->Increment();
if (!FLAG_array_bounds_checks_elimination) continue;
int32_t offset;
@ -3035,10 +3035,8 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
check->DeleteAndReplaceWith(NULL);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else if (data->BasicBlock() == bb) {
data->CoverCheck(check, offset);
isolate()->counters()->array_bounds_checks_removed()->Increment();
} else {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
@ -3082,6 +3080,93 @@ void HGraph::EliminateRedundantBoundsChecks() {
}
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
HValue* index = array_operation->GetKey();
HConstant* constant;
HValue* subexpression;
int32_t sign;
if (index->IsAdd()) {
sign = 1;
HAdd* add = HAdd::cast(index);
if (add->left()->IsConstant()) {
subexpression = add->right();
constant = HConstant::cast(add->left());
} else if (add->right()->IsConstant()) {
subexpression = add->left();
constant = HConstant::cast(add->right());
} else {
return;
}
} else if (index->IsSub()) {
sign = -1;
HSub* sub = HSub::cast(index);
if (sub->left()->IsConstant()) {
subexpression = sub->right();
constant = HConstant::cast(sub->left());
} else if (sub->right()->IsConstant()) {
subexpression = sub->left();
constant = HConstant::cast(sub->right());
} return;
} else {
return;
}
if (!constant->HasInteger32Value()) return;
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
if (value >= 1 << 30 || value < 0) return;
array_operation->SetKey(subexpression);
if (index->HasNoUses()) {
index->DeleteAndReplaceWith(NULL);
}
ASSERT(value >= 0);
array_operation->SetIndexOffset(static_cast<uint32_t>(value));
array_operation->SetDehoisted(true);
}
void HGraph::DehoistSimpleArrayIndexComputations() {
if (!FLAG_array_index_dehoisting) return;
HPhase phase("H_Dehoist index computations", this);
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
instr != NULL;
instr = instr->next()) {
ArrayInstructionInterface* array_instruction = NULL;
if (instr->IsLoadKeyedFastElement()) {
HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedFastDoubleElement()) {
HLoadKeyedFastDoubleElement* op =
HLoadKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsLoadKeyedSpecializedArrayElement()) {
HLoadKeyedSpecializedArrayElement* op =
HLoadKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastElement()) {
HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedFastDoubleElement()) {
HStoreKeyedFastDoubleElement* op =
HStoreKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else if (instr->IsStoreKeyedSpecializedArrayElement()) {
HStoreKeyedSpecializedArrayElement* op =
HStoreKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else {
continue;
}
DehoistArrayIndex(array_instruction);
}
}
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
@ -3881,7 +3966,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
HLoadKeyedFastElement::OMIT_HOLE_CHECK));
OMIT_HOLE_CHECK));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@ -4172,7 +4257,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastElements()) {
} else if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@ -4379,11 +4464,13 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Representation::Integer32()));
switch (boilerplate_elements_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(value));
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastElement(
elements,
key,
@ -4391,6 +4478,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind));
break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
key,
value));
@ -5148,9 +5236,12 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -5175,13 +5266,16 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
ASSERT(val != NULL);
switch (elements_kind) {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return new(zone()) HStoreKeyedFastDoubleElement(
elements, checked_key, val);
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check.
AddInstruction(new(zone()) HCheckSmi(val));
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
default:
@ -5190,10 +5284,13 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
}
}
// It's an element load (!is_store).
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
} else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
return new(zone()) HLoadKeyedFastElement(elements, checked_key);
HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ?
OMIT_HOLE_CHECK :
PERFORM_HOLE_CHECK;
if (IsFastDoubleElementsKind(elements_kind)) {
return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode);
} else { // Smi or Object elements.
return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode);
}
}
@ -5201,15 +5298,30 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store) {
HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map));
bool fast_smi_only_elements = map->has_fast_smi_only_elements();
bool fast_elements = map->has_fast_elements();
HInstruction* mapcheck =
AddInstruction(new(zone()) HCheckMaps(object, map, dependency));
// No GVNFlag is necessary for ElementsKind if there is an explicit dependency
// on a HElementsTransition instruction. The flag can also be removed if the
// map to check has FAST_HOLEY_ELEMENTS, since there can be no further
// ElementsKind transitions. Finally, the dependency can be removed for stores
// for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
// generated store code.
if (dependency ||
(map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
(map->elements_kind() == FAST_ELEMENTS && is_store)) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
bool fast_smi_only_elements = map->has_fast_smi_elements();
bool fast_elements = map->has_fast_object_elements();
HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
if (is_store && (fast_elements || fast_smi_only_elements)) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map()));
HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map());
check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
AddInstruction(check_cow_map);
}
HInstruction* length = NULL;
HInstruction* checked_key = NULL;
@ -5262,8 +5374,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ElementsKind elements_kind = map->elements_kind();
if (elements_kind == FAST_DOUBLE_ELEMENTS ||
elements_kind == FAST_ELEMENTS) {
if (IsFastElementsKind(elements_kind) &&
elements_kind != GetInitialFastElementsKind()) {
possible_transitioned_maps.Add(map);
}
}
@ -5277,12 +5389,17 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
int num_untransitionable_maps = 0;
Handle<Map> untransitionable_map;
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
ASSERT(map->IsMap());
if (!transition_target.at(i).is_null()) {
AddInstruction(new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i)));
ASSERT(Map::IsValidElementsTransition(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
transition = new(zone()) HTransitionElementsKind(
object, map, transition_target.at(i));
AddInstruction(transition);
} else {
type_todo[map->elements_kind()] = true;
if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
@ -5302,7 +5419,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
: BuildLoadKeyedGeneric(object, key));
} else {
instr = AddInstruction(BuildMonomorphicElementAccess(
object, key, val, untransitionable_map, is_store));
object, key, val, transition, untransitionable_map, is_store));
}
*has_side_effects |= instr->HasObservableSideEffects();
instr->set_position(position);
@ -5319,20 +5436,18 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HLoadExternalArrayPointer* external_elements = NULL;
HInstruction* checked_key = NULL;
// Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
// arrays.
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
// Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds
// are handled before external arrays.
STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
elements_kind <= LAST_ELEMENTS_KIND;
elements_kind = ElementsKind(elements_kind + 1)) {
// After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
// FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
// that's executed for all external array cases.
// After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some
// code that's executed for all external array cases.
STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
LAST_ELEMENTS_KIND);
if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@ -5354,10 +5469,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_true);
HInstruction* access;
if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS) {
if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
if (IsFastElementsKind(elements_kind)) {
if (is_store && !IsFastDoubleElementsKind(elements_kind)) {
AddInstruction(new(zone()) HCheckMaps(
elements, isolate()->factory()->fixed_array_map(),
elements_kind_branch));
@ -5444,7 +5557,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
: BuildLoadKeyedGeneric(obj, key);
} else {
AddInstruction(new(zone()) HCheckNonSmi(obj));
instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store);
}
} else if (expr->GetReceiverTypes() != NULL &&
!expr->GetReceiverTypes()->is_empty()) {
@ -8135,14 +8248,6 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
}
// Fast swapping of elements. Takes three expressions, the object and two
// indices. This should only be used if the indices are known to be
// non-negative and within bounds of the elements array at the call site.
void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
return Bailout("inlined runtime function: SwapElements");
}
// Fast call for custom callbacks.
void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.

2
deps/v8/src/hydrogen.h

@ -267,6 +267,7 @@ class HGraph: public ZoneObject {
void AssignDominators();
void ReplaceCheckedValues();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void PropagateDeoptimizingMark();
// Returns false if there are phi-uses of the arguments-object
@ -1092,6 +1093,7 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
HValue* dependency,
Handle<Map> map,
bool is_store);
HValue* HandlePolymorphicElementAccess(HValue* object,

9
deps/v8/src/ia32/builtins-ia32.cc

@ -900,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -1003,7 +1003,8 @@ static void AllocateJSArray(MacroAssembler* masm,
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
__ LoadInitialArrayMap(array_function, scratch, elements_array);
__ LoadInitialArrayMap(array_function, scratch,
elements_array, fill_with_hole);
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
@ -1274,11 +1275,11 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&prepare_generic_code_call);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
__ mov(ebx, Operand(esp, 0));
__ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(
FAST_SMI_ONLY_ELEMENTS,
FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
edi,
eax,

38
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3822,20 +3822,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->regexp_entry_native(), 1);
// Isolates: note we add an additional parameter here (isolate pointer).
static const int kRegExpExecuteArguments = 8;
static const int kRegExpExecuteArguments = 9;
__ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 8: Pass current isolate address.
__ mov(Operand(esp, 7 * kPointerSize),
// Argument 9: Pass current isolate address.
__ mov(Operand(esp, 8 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
// Argument 6: Start (high end) of backtracking stack memory area.
// Argument 7: Start (high end) of backtracking stack memory area.
__ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
__ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
__ mov(Operand(esp, 5 * kPointerSize), esi);
__ mov(Operand(esp, 6 * kPointerSize), esi);
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
// Argument 5: static offsets vector buffer.
__ mov(Operand(esp, 4 * kPointerSize),
@ -3898,7 +3902,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
__ cmp(eax, 1);
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ j(equal, &success);
Label failure;
__ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
@ -7057,8 +7063,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
{ REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
{ REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
@ -7330,9 +7336,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ CheckFastElements(edi, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(eax, &smi_element);
__ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
__ CheckFastSmiElements(edi, &fast_elements, Label::kNear);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7354,7 +7360,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ pop(edx);
__ jmp(&slow_elements);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
@ -7367,15 +7373,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
OMIT_SMI_CHECK);
__ ret(0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
__ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
FixedArrayBase::kHeaderSize), eax);
__ ret(0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ push(edx);

4
deps/v8/src/ia32/codegen-ia32.cc

@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value

2
deps/v8/src/ia32/debug-ia32.cc

@ -175,7 +175,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Read current padding counter and skip corresponding number of words.
__ pop(unused_reg);
// We divide stored value by 2 (untagging) and multiply it by word's size.
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
__ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
// Get rid of the internal frame.

109
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1649,7 +1649,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
bool has_constant_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1660,7 +1661,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
@ -1672,10 +1673,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
// If the elements are already FAST_ELEMENTS, the boilerplate cannot
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1703,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) {
// Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
// transition and don't need to call the runtime stub.
if (IsFastObjectElementsKind(constant_elements_kind)) {
// Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they
// cannot transition and don't need to call the runtime stub.
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ mov(ebx, Operand(esp, 0)); // Copy of array literal.
__ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
@ -3405,99 +3405,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
Label done;
Label slow_case;
Register object = eax;
Register index_1 = ebx;
Register index_2 = ecx;
Register elements = edi;
Register temp = edx;
__ mov(object, Operand(esp, 2 * kPointerSize));
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ CmpObjectType(object, JS_ARRAY_TYPE, temp);
__ j(not_equal, &slow_case);
__ test_b(FieldOperand(temp, Map::kBitFieldOffset),
KeyedLoadIC::kSlowCaseBitFieldMask);
__ j(not_zero, &slow_case);
// Check the object's elements are in fast case and writable.
__ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(isolate()->factory()->fixed_array_map()));
__ j(not_equal, &slow_case);
// Check that both indices are smis.
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
__ or_(temp, index_2);
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
__ cmp(temp, index_1);
__ j(below_equal, &slow_case);
__ cmp(temp, index_2);
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
__ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
__ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
// Swap elements. Use object and temp as scratch registers.
__ mov(object, Operand(index_1, 0));
__ mov(temp, Operand(index_2, 0));
__ mov(Operand(index_2, 0), object);
__ mov(Operand(index_1, 0), temp);
Label no_remembered_set;
__ CheckPageFlag(elements,
temp,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&no_remembered_set,
Label::kNear);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask.)
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index_1,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index_2,
temp,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ add(esp, Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
__ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
context()->Plug(eax);
}
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());

16
deps/v8/src/ia32/ic-ia32.cc

@ -889,25 +889,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
&non_double_value,
DONT_DO_SMI_CHECK);
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&slow);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@ -1622,7 +1622,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);

123
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -2377,8 +2377,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset));
__ and_(temp, Map::kElementsKindMask);
__ shr(temp, Map::kElementsKindShift);
__ cmp(temp, FAST_ELEMENTS);
__ j(equal, &ok, Label::kNear);
__ cmp(temp, GetInitialFastElementsKind());
__ j(less, &fail, Label::kNear);
__ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND);
__ j(less_equal, &ok, Label::kNear);
__ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
__ j(less, &fail, Label::kNear);
__ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
@ -2421,9 +2423,11 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ mov(result,
BuildFastArrayOperand(instr->elements(), instr->key(),
BuildFastArrayOperand(instr->elements(),
instr->key(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag));
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2437,18 +2441,24 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS,
offset);
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
FAST_DOUBLE_ELEMENTS,
offset,
instr->additional_index());
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
instr->elements(),
instr->key(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ movdbl(result, double_load_operand);
}
@ -2457,7 +2467,8 @@ Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
uint32_t offset) {
uint32_t offset,
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
@ -2466,10 +2477,14 @@ Operand LCodeGen::BuildFastArrayOperand(
Abort("array index constant value too big");
}
return Operand(elements_pointer_reg,
constant_value * (1 << shift_size) + offset);
((constant_value + additional_index) << shift_size)
+ offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
offset + (additional_index << shift_size));
}
}
@ -2478,7 +2493,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(), elements_kind, 0));
instr->key(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
@ -2514,9 +2532,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3415,7 +3436,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Operand operand(BuildFastArrayOperand(instr->external_pointer(),
instr->key(), elements_kind, 0));
instr->key(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
@ -3439,9 +3463,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3456,31 +3483,21 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value);
} else {
__ mov(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value);
}
Operand operand = BuildFastArrayOperand(
instr->object(),
instr->key(),
FAST_ELEMENTS,
FixedArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ mov(operand, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(!instr->key()->IsConstantOperand());
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ lea(key, operand);
__ RecordWrite(elements,
key,
value,
@ -3508,8 +3525,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
instr->elements(),
instr->key(),
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
__ movdbl(double_store_operand, value);
}
@ -3540,22 +3560,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
__ mov(new_map_reg, to_map);
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register object_reg = ToRegister(instr->object());
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp_reg(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp_reg()), kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(edx));
ASSERT(new_map_reg.is(ebx));
@ -4415,8 +4436,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,
@ -4578,8 +4600,9 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
// Load the map's "bit field 2". We only need the first byte,

3
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -242,7 +242,8 @@ class LCodeGen BASE_EMBEDDED {
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
uint32_t offset);
uint32_t offset,
uint32_t additional_index = 0);
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);

8
deps/v8/src/ia32/lithium-ia32.cc

@ -1990,8 +1990,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
key);
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
@ -2093,8 +2092,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();

12
deps/v8/src/ia32/lithium-ia32.h

@ -1238,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements,
LOperand* key) {
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
@ -1255,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1275,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1775,6 +1776,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1797,6 +1799,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1822,6 +1825,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

88
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -382,10 +382,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::CheckFastElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastElementValue);
Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
@ -393,23 +395,26 @@ void MacroAssembler::CheckFastElements(Register map,
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastSmiOnlyElementValue);
Map::kMaximumBitField2FastHoleySmiElementValue);
j(below_equal, fail, distance);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastElementValue);
Map::kMaximumBitField2FastHoleyElementValue);
j(above, fail, distance);
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
void MacroAssembler::CheckFastSmiElements(Register map,
Label* fail,
Label::Distance distance) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
cmpb(FieldOperand(map, Map::kBitField2Offset),
Map::kMaximumBitField2FastSmiOnlyElementValue);
Map::kMaximumBitField2FastHoleySmiElementValue);
j(above, fail, distance);
}
@ -493,24 +498,18 @@ void MacroAssembler::CompareMap(Register obj,
CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_double_map));
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(current_map));
}
}
}
}
@ -2161,27 +2160,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
mov(scratch, Operand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
cmp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
mov(map_in_out, FieldOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
mov(map_out, FieldOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);

9
deps/v8/src/ia32/macro-assembler-ia32.h

@ -235,7 +235,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
Register map_out,
bool can_have_holes);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@ -357,9 +358,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);
void CheckFastSmiElements(Register map,
Label* fail,
Label::Distance distance = Label::kFar);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in

156
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -42,28 +42,30 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - edx : current character. Must be loaded using LoadCurrentCharacter
* before using any of the dispatch methods.
* - edi : current position in input, as negative offset from end of string.
* - edx : Current character. Must be loaded using LoadCurrentCharacter
* before using any of the dispatch methods. Temporarily stores the
* index of capture start after a matching pass for a global regexp.
* - edi : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - esi : end of input (points to byte after last character in input).
* - ebp : frame pointer. Used to access arguments, local variables and
* - ebp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - esp : points to tip of C stack.
* - ecx : points to tip of backtrack stack
* - esp : Points to tip of C stack.
* - ecx : Points to tip of backtrack stack
*
* The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
* - Isolate* isolate (Address of the current isolate)
* - Isolate* isolate (address of the current isolate)
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* - stack_area_base (high end of the memory area to use as
* backtracking stack)
* - capture array size (may fit multiple sets of matches)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - end of input (address of end of string)
* - start of input (address of first character in string)
* - start index (character index of start)
* - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
@ -72,9 +74,10 @@ namespace internal {
* - backup of caller esi
* - backup of caller edi
* - backup of caller ebx
* - success counter (only for global regexps to count matches).
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 0 ebp[-4] (only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
*
@ -706,13 +709,16 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
ASSERT(FAILURE == 0); // Return value for failure is zero.
__ Set(eax, Immediate(0));
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
__ Set(eax, Immediate(FAILURE));
}
__ jmp(&exit_label_);
}
Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label return_eax;
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -731,6 +737,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(esi);
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Number of successful matches in a global regexp.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
@ -750,13 +757,13 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ mov(eax, EXCEPTION);
__ jmp(&exit_label_);
__ jmp(&return_eax);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
__ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &exit_label_);
__ j(not_zero, &return_eax);
__ bind(&stack_ok);
// Load start index for later use.
@ -783,19 +790,8 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
__ mov(ecx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
__ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
}
// Ensure that we have written to each stack page, in order. Skipping a page
#ifdef WIN32
// Ensure that we write to each stack page, in order. Skipping a page
// on Windows can cause segmentation faults. Assuming page size is 4k.
const int kPageSize = 4096;
const int kRegistersPerPage = kPageSize / kPointerSize;
@ -804,20 +800,45 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
i += kRegistersPerPage) {
__ mov(register_location(i), eax); // One write every page.
}
#endif // WIN32
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
__ mov(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
__ mov(ecx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, 0), eax);
__ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
} else { // Unroll the loop.
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(register_location(i), eax);
}
}
}
// Initialize backtrack stack pointer.
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
// Load previous char as initial value of current-character.
Label at_start;
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ mov(current_character(), '\n');
__ jmp(&start_label_);
__ jmp(&start_label_);
// Exit code:
if (success_label_.is_linked()) {
@ -836,6 +857,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
if (i == 0 && global()) {
// Keep capture start in edx for the zero-length check later.
__ mov(edx, eax);
}
// Convert to index from start of string, not end.
__ add(eax, ecx);
if (mode_ == UC16) {
@ -844,10 +869,54 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(Operand(ebx, i * kPointerSize), eax);
}
}
__ mov(eax, Immediate(SUCCESS));
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
__ inc(Operand(ebp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ mov(ecx, Operand(ebp, kNumOutputRegisters));
__ sub(ecx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
__ cmp(ecx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ mov(Operand(ebp, kNumOutputRegisters), ecx);
// Advance the location for output.
__ add(Operand(ebp, kRegisterOutput),
Immediate(num_saved_registers_ * kPointerSize));
// Prepare eax to initialize registers with its value in the next run.
__ mov(eax, Operand(ebp, kInputStartMinusOne));
// Special case for zero-length matches.
// edx: capture start index
__ cmp(edi, edx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// edi (offset from the end) is zero if we already reached the end.
__ test(edi, edi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
__ add(edi, Immediate(2));
} else {
__ inc(edi);
}
__ jmp(&load_char_start_regexp);
} else {
__ mov(eax, Immediate(SUCCESS));
}
}
// Exit and return eax
__ bind(&exit_label_);
if (global()) {
// Return the number of successful captures.
__ mov(eax, Operand(ebp, kSuccessfulCaptures));
}
__ bind(&return_eax);
// Skip esp past regexp registers.
__ lea(esp, Operand(ebp, kBackup_ebx));
// Restore callee-save registers.
@ -877,7 +946,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
__ j(not_zero, &return_eax);
__ pop(edi);
__ pop(backtrack_stackpointer());
@ -924,7 +993,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ mov(eax, EXCEPTION);
__ jmp(&exit_label_);
__ jmp(&return_eax);
}
CodeDesc code_desc;
@ -1043,8 +1112,9 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
}
void RegExpMacroAssemblerIA32::Succeed() {
bool RegExpMacroAssemblerIA32::Succeed() {
__ jmp(&success_label_);
return global();
}

13
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -1,4 +1,4 @@
// Copyright 2008-2009 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -111,7 +111,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -135,7 +135,11 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kInputStart = kStartIndex + kPointerSize;
static const int kInputEnd = kInputStart + kPointerSize;
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
// For the case of global regular expression, we have room to store at least
// one set of capture results. For the case of non-global regexp, we ignore
// this value.
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer - local stack variables.
@ -144,7 +148,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_esi = kFramePointer - kPointerSize;
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;

8
deps/v8/src/ia32/simulator-ia32.h

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -40,12 +40,12 @@ namespace internal {
typedef int (*regexp_matcher)(String*, int, const byte*,
const byte*, int*, Address, int, Isolate*);
const byte*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address should
// expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \

27
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1462,16 +1462,31 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(ebx, &call_builtin);
__ CheckFastSmiElements(ebx, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
ebx,
edi,
&try_holey_map);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
ebx,
edi,
&call_builtin);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@ -3818,7 +3833,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(eax, &transition_elements_kind);
}
@ -3843,7 +3858,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ j(not_equal, &miss_force_generic);
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size
__ mov(FieldOperand(edi,
@ -3851,7 +3866,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
times_half_pointer_size,
FixedArray::kHeaderSize), eax);
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
ASSERT(IsFastObjectElementsKind(elements_kind));
// Do the store and update the write barrier.
// ecx is a smi, use times_half_pointer_size instead of
// times_pointer_size

65
deps/v8/src/ic.cc

@ -1644,8 +1644,7 @@ Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
return string_stub();
} else {
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_elements() ||
receiver_map->has_fast_smi_only_elements() ||
receiver_map->has_fast_smi_or_object_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@ -1660,8 +1659,7 @@ Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
StubKind stub_kind,
StrictModeFlag strict_mode,
Handle<Code> generic_stub) {
if (receiver->HasFastElements() ||
receiver->HasFastSmiOnlyElements() ||
if (receiver->HasFastSmiOrObjectElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
@ -1681,15 +1679,26 @@ Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
break;
case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
break;
default:
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_ELEMENTS);
case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_DOUBLE_ELEMENTS);
case KeyedIC::LOAD:
case KeyedIC::STORE_NO_TRANSITION:
case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
UNREACHABLE();
return Handle<Map>::null();
break;
}
return Handle<Map>::null();
}
@ -1749,30 +1758,54 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
if (allow_growth) {
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiOnlyElements()) {
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
}
}
if (value->IsHeapObject()) {
return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
}
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
if (receiver->HasFastHoleyElements()) {
return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
}
}
}
return STORE_AND_GROW_NO_TRANSITION;
} else {
// Handle only in-bounds elements accesses.
if (receiver->HasFastSmiOnlyElements()) {
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
return STORE_TRANSITION_SMI_TO_DOUBLE;
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
} else {
return STORE_TRANSITION_SMI_TO_DOUBLE;
}
} else if (value->IsHeapObject()) {
return STORE_TRANSITION_SMI_TO_OBJECT;
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
} else {
return STORE_TRANSITION_SMI_TO_OBJECT;
}
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
return STORE_TRANSITION_DOUBLE_TO_OBJECT;
if (receiver->HasFastHoleyElements()) {
return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
} else {
return STORE_TRANSITION_DOUBLE_TO_OBJECT;
}
}
}
return STORE_NO_TRANSITION;

8
deps/v8/src/ic.h

@ -378,10 +378,16 @@ class KeyedIC: public IC {
STORE_TRANSITION_SMI_TO_OBJECT,
STORE_TRANSITION_SMI_TO_DOUBLE,
STORE_TRANSITION_DOUBLE_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
STORE_AND_GROW_NO_TRANSITION,
STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
};
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -

24
deps/v8/src/incremental-marking-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -118,13 +118,29 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
WhiteToGrey(obj, mark_bit);
Marking::WhiteToGrey(mark_bit);
marking_deque_.PushGrey(obj);
}
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
Marking::WhiteToGrey(mark_bit);
bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
WhiteToGreyAndPush(obj, mark_bit);
return true;
}
return false;
}
bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
mark_bit.Set();
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
return true;
}
return false;
}

41
deps/v8/src/incremental-marking.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -42,6 +42,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
marker_(this, heap->mark_compact_collector()),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@ -663,6 +664,22 @@ void IncrementalMarking::Hurry() {
} else if (map == global_context_map) {
// Global contexts have weak fields.
VisitGlobalContext(Context::cast(obj), &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else {
obj->Iterate(&marking_visitor);
}
@ -807,12 +824,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
Map* map = obj->map();
if (map == filler_map) continue;
if (obj->IsMap()) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
}
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@ -830,6 +841,22 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
VisitGlobalContext(ctx, &marking_visitor);
} else if (map->instance_type() == MAP_TYPE) {
Map* map = Map::cast(obj);
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
// transitions and back pointers in a special way to make these links
// weak. Only maps for subclasses of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_visitor.VisitPointers(
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
}
} else if (map->instance_type() == JS_FUNCTION_TYPE) {
marking_visitor.VisitPointers(
HeapObject::RawField(obj, JSFunction::kPropertiesOffset),

15
deps/v8/src/incremental-marking.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -154,8 +154,6 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
// Does white->black or keeps gray or black color. Returns true if converting
// white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@ -169,6 +167,16 @@ class IncrementalMarking {
return true;
}
// Marks the object grey and pushes it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
inline int steps_count() {
return steps_count_;
}
@ -260,6 +268,7 @@ class IncrementalMarking {
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;

2
deps/v8/src/isolate.h

@ -965,7 +965,7 @@ class Isolate {
// SerializerDeserializer state.
static const int kPartialSnapshotCacheCapacity = 1400;
static const int kJSRegexpStaticOffsetsVectorSize = 50;
static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
return thread_local_top_.external_callback_;

28
deps/v8/src/jsregexp.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -324,7 +324,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
index)));
if (index == -1) return isolate->factory()->null_value();
}
ASSERT(last_match_info->HasFastElements());
ASSERT(last_match_info->HasFastObjectElements());
{
NoHandleAllocation no_handles;
@ -429,6 +429,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
RegExpEngine::CompilationResult result =
RegExpEngine::Compile(&compile_data,
flags.is_ignore_case(),
flags.is_global(),
flags.is_multiline(),
pattern,
sample_subject,
@ -515,7 +516,23 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
}
RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
int RegExpImpl::GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
int registers_per_match,
int* max_matches) {
#ifdef V8_INTERPRETED_REGEXP
// Global loop in interpreted regexp is not implemented. Therefore we choose
// the size of the offsets vector so that it can only store one match.
*max_matches = 1;
return registers_per_match;
#else // V8_INTERPRETED_REGEXP
int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize);
*max_matches = size / registers_per_match;
return size;
#endif // V8_INTERPRETED_REGEXP
}
int RegExpImpl::IrregexpExecRaw(
Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
@ -617,7 +634,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers, isolate);
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
int res = RegExpImpl::IrregexpExecRaw(
jsregexp, subject, previous_index, Vector<int>(registers.vector(),
registers.length()));
if (res == RE_SUCCESS) {
@ -5780,6 +5797,7 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
RegExpEngine::CompilationResult RegExpEngine::Compile(
RegExpCompileData* data,
bool ignore_case,
bool is_global,
bool is_multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@ -5883,6 +5901,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
macro_assembler.set_global(is_global);
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,

26
deps/v8/src/jsregexp.h

@ -109,16 +109,22 @@ class RegExpImpl {
static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject);
// Execute a regular expression once on the subject, starting from
// character "index".
// If successful, returns RE_SUCCESS and set the capture positions
// in the first registers.
// Calculate the size of offsets vector for the case of global regexp
// and the number of matches this vector is able to store.
static int GlobalOffsetsVectorSize(Handle<JSRegExp> regexp,
int registers_per_match,
int* max_matches);
// Execute a regular expression on the subject, starting from index.
// If matching succeeds, return the number of matches. This can be larger
// than one in the case of global regular expressions.
// The captures and subcaptures are stored into the registers vector.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int> registers);
static int IrregexpExecRaw(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int> registers);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
@ -1545,6 +1551,7 @@ class RegExpEngine: public AllStatic {
static CompilationResult Compile(RegExpCompileData* input,
bool ignore_case,
bool global,
bool multiline,
Handle<String> pattern,
Handle<String> sample_subject,
@ -1573,7 +1580,8 @@ class OffsetsVector {
inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; }
static const int kStaticOffsetsVectorSize = 50;
static const int kStaticOffsetsVectorSize =
Isolate::kJSRegexpStaticOffsetsVectorSize;
private:
static Address static_offsets_vector_address(Isolate* isolate) {

7
deps/v8/src/lithium.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -225,9 +225,12 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
return 2;
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return 3;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
return kPointerSizeLog2;

28
deps/v8/src/mark-compact-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -52,6 +52,15 @@ void MarkCompactCollector::SetFlags(int flags) {
}
bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
if (MarkObjectWithoutPush(obj)) {
marking_deque_.PushBlack(obj);
return true;
}
return false;
}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
if (!mark_bit.Get()) {
@ -62,16 +71,13 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
}
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
MarkBit mark = Marking::MarkBitFrom(object);
bool old_mark = mark.Get();
if (!old_mark) SetMark(object, mark);
return old_mark;
}
void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
if (!mark_bit.Get()) {
SetMark(obj, mark_bit);
return true;
}
return false;
}

158
deps/v8/src/mark-compact.cc

@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
encountered_weak_maps_(NULL) { }
encountered_weak_maps_(NULL),
marker_(this, this) { }
#ifdef DEBUG
@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
if (collect_maps_) ClearNonLiveTransitions();
if (FLAG_collect_maps) ClearNonLiveTransitions();
ClearWeakMaps();
@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepSpaces();
if (!collect_maps_) ReattachInitialMaps();
if (!FLAG_collect_maps) ReattachInitialMaps();
Finish();
@ -658,11 +658,6 @@ void MarkCompactCollector::AbortCompaction() {
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
// Disable collection of maps if incremental marking is enabled.
// Map collection algorithm relies on a special map transition tree traversal
// order which is not implemented for incremental marking.
collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
// Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
@ -1798,11 +1793,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
// in a special way to make transition links weak.
// Only maps for subclasses of JSReceiver can have transitions.
// in a special way to make transition links weak. Only maps for subclasses
// of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
MarkMapContents(map);
if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
marker_.MarkMapContents(map);
} else {
marking_deque_.PushBlack(map);
}
@ -1812,85 +1807,86 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
}
void MarkCompactCollector::MarkMapContents(Map* map) {
// Force instantiation of template instances.
template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
template <class T>
void Marker<T>::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions. But make sure that back pointers
// stored inside prototype transitions arrays are marked.
Object* raw_proto_transitions = map->unchecked_prototype_transitions();
if (raw_proto_transitions->IsFixedArray()) {
FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions);
// transitions in ClearNonLiveTransitions.
Object** proto_trans_slot =
HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
if (prototype_transitions->IsFixedArray()) {
mark_compact_collector()->RecordSlot(proto_trans_slot,
proto_trans_slot,
prototype_transitions);
MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
if (!mark.Get()) {
mark.Set();
MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
prototype_transitions->Size());
MarkObjectAndPush(HeapObject::cast(
prototype_transitions->get(Map::kProtoTransitionBackPointerOffset)));
}
}
Object** raw_descriptor_array_slot =
// Make sure that the back pointer stored either in the map itself or inside
// its prototype transitions array is marked. Treat pointers in the descriptor
// array as weak and also mark that array to prevent visiting it later.
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
Object** descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
Object* raw_descriptor_array = *raw_descriptor_array_slot;
if (!raw_descriptor_array->IsSmi()) {
MarkDescriptorArray(
reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
Object* descriptor_array = *descriptor_array_slot;
if (!descriptor_array->IsSmi()) {
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
}
// Mark the Object* fields of the Map. Since the descriptor array has been
// marked already, it is fine that one of these fields contains a pointer
// to it. But make sure to skip back pointer and prototype transitions.
STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
Object** start_slot = HeapObject::RawField(
map, Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(
map, Map::kPrototypeTransitionsOrBackPointerOffset);
for (Object** slot = start_slot; slot < end_slot; slot++) {
Object* obj = *slot;
if (!obj->NonFailureIsHeapObject()) continue;
mark_compact_collector()->RecordSlot(start_slot, slot, obj);
base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
}
// Mark the Object* fields of the Map.
// Since the descriptor array has been marked already, it is fine
// that one of these fields contains a pointer to it.
Object** start_slot = HeapObject::RawField(map,
Map::kPointerFieldsBeginOffset);
Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
}
void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
RecordSlot(slot, slot, accessor);
MarkObjectAndPush(accessor);
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
if (descriptors_mark.Get()) return;
template <class T>
void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != heap()->empty_descriptor_array());
SetMark(descriptors, descriptors_mark);
ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
FixedArray* contents = reinterpret_cast<FixedArray*>(
// The DescriptorArray contains a pointer to its contents array, but the
// contents array will be marked black and hence not be visited again.
if (!base_marker()->MarkObjectAndPush(descriptors)) return;
FixedArray* contents = FixedArray::cast(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
ASSERT(!IsMarked(contents));
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
MarkBit contents_mark = Marking::MarkBitFrom(contents);
SetMark(contents, contents_mark);
// Contents contains (value, details) pairs. If the details say that the type
// of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
// EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
// live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
// CONSTANT_TRANSITION is the value an Object* (a Map*).
ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
base_marker()->MarkObjectWithoutPush(contents);
// Contents contains (value, details) pairs. If the descriptor contains a
// transition (value is a Map), we don't mark the value as live. It might
// be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
for (int i = 0; i < contents->length(); i += 2) {
// If the pair (value, details) at index i, i+1 is not
// a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
RecordSlot(slot, slot, *slot);
mark_compact_collector()->RecordSlot(slot, slot, *slot);
switch (details.type()) {
case NORMAL:
@ -1898,21 +1894,22 @@ void MarkCompactCollector::MarkDescriptorArray(
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
MarkObjectAndPush(value);
base_marker()->MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
MarkObjectAndPush(value);
} else if (!MarkObjectWithoutPush(value)) {
MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
base_marker()->MarkObjectAndPush(value);
} else if (base_marker()->MarkObjectWithoutPush(value)) {
AccessorPair* accessors = AccessorPair::cast(value);
MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
}
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
if (value->IsFixedArray()) MarkObjectWithoutPush(value);
if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
@ -1920,9 +1917,16 @@ void MarkCompactCollector::MarkDescriptorArray(
break;
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
// but the contents array is already marked.
marking_deque_.PushBlack(descriptors);
}
template <class T>
void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
mark_compact_collector()->RecordSlot(slot, slot, accessor);
base_marker()->MarkObjectAndPush(accessor);
}
@ -2734,7 +2738,9 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
// We have to zap this pointer, because the store buffer may overflow later,
// and then we have to scan the entire heap and we don't want to find
// spurious newspace pointers in the old space.
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
// TODO(mstarzinger): This was changed to a sentinel value to track down
// rare crashes, change it back to Smi::FromInt(0) later.
*p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
}
}

57
deps/v8/src/mark-compact.h

@ -42,6 +42,7 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Forward declarations.
class CodeFlusher;
class GCTracer;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
@ -166,7 +167,6 @@ class Marking {
// ----------------------------------------------------------------------------
// Marking deque for tracing live objects.
class MarkingDeque {
public:
MarkingDeque()
@ -383,6 +383,34 @@ class SlotsBuffer {
};
// -------------------------------------------------------------------------
// Marker shared between incremental and non-incremental marking
template<class BaseMarker> class Marker {
public:
Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
: base_marker_(base_marker),
mark_compact_collector_(mark_compact_collector) {}
// Mark pointers in a Map and its DescriptorArray together, possibly
// treating transitions or back pointers weak.
void MarkMapContents(Map* map);
void MarkDescriptorArray(DescriptorArray* descriptors);
void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
private:
BaseMarker* base_marker() {
return base_marker_;
}
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_;
}
BaseMarker* base_marker_;
MarkCompactCollector* mark_compact_collector_;
};
// Defined in isolate.h.
class ThreadLocalTop;
@ -584,8 +612,6 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
bool collect_maps_;
bool flush_monomorphic_ics_;
// A pointer to the current stack-allocated GC tracer object during a full
@ -608,12 +634,13 @@ class MarkCompactCollector {
//
// After: Live objects are marked and non-live objects are unmarked.
friend class RootMarkingVisitor;
friend class MarkingVisitor;
friend class StaticMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
friend class Marker<IncrementalMarking>;
friend class Marker<MarkCompactCollector>;
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
@ -631,22 +658,25 @@ class MarkCompactCollector {
void AfterMarking();
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking.
// Returns true if object needed marking and false otherwise.
// This is for non-incremental marking only.
INLINE(bool MarkObjectAndPush(HeapObject* obj));
// Marks the object black and pushes it on the marking stack.
// This is for non-incremental marking only.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
INLINE(bool MarkObjectWithoutPush(HeapObject* object));
INLINE(void MarkObjectAndPush(HeapObject* value));
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
// This is for non-incremental marking only.
INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
// Marks the object black. This is for non-incremental marking.
// Marks the object black assuming that it is not yet marked.
// This is for non-incremental marking only.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
void ProcessNewlyMarkedObject(HeapObject* obj);
// Mark a Map and its DescriptorArray together, skipping transitions.
void MarkMapContents(Map* map);
void MarkAccessorPairSlot(HeapObject* accessors, int offset);
void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
@ -749,6 +779,7 @@ class MarkCompactCollector {
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;

183
deps/v8/src/messages.js

@ -61,18 +61,21 @@ function FormatString(format, message) {
// To check if something is a native error we need to check the
// concrete native error types. It is not enough to check "obj
// instanceof $Error" because user code can replace
// NativeError.prototype.__proto__. User code cannot replace
// NativeError.prototype though and therefore this is a safe test.
// concrete native error types. It is not sufficient to use instanceof
// since it possible to create an object that has Error.prototype on
// its prototype chain. This is the case for DOMException for example.
function IsNativeErrorObject(obj) {
return (obj instanceof $Error) ||
(obj instanceof $EvalError) ||
(obj instanceof $RangeError) ||
(obj instanceof $ReferenceError) ||
(obj instanceof $SyntaxError) ||
(obj instanceof $TypeError) ||
(obj instanceof $URIError);
switch (%_ClassOf(obj)) {
case 'Error':
case 'EvalError':
case 'RangeError':
case 'ReferenceError':
case 'SyntaxError':
case 'TypeError':
case 'URIError':
return true;
}
return false;
}
@ -745,7 +748,7 @@ function GetPositionInLine(message) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
return FormatSourcePosition(new CallSite(recv, fun, pos));
return new CallSite(recv, fun, pos).toString();
}
// ----------------------------------------------------------------------------
@ -785,15 +788,7 @@ function CallSiteGetThis() {
}
function CallSiteGetTypeName() {
var constructor = this.receiver.constructor;
if (!constructor) {
return %_CallFunction(this.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return %_CallFunction(this.receiver, ObjectToString);
}
return constructorName;
return GetTypeName(this, false);
}
function CallSiteIsToplevel() {
@ -827,8 +822,10 @@ function CallSiteGetFunctionName() {
var name = this.fun.name;
if (name) {
return name;
} else {
return %FunctionGetInferredName(this.fun);
}
name = %FunctionGetInferredName(this.fun);
if (name) {
return name;
}
// Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun);
@ -919,6 +916,69 @@ function CallSiteIsConstructor() {
return this.fun === constructor;
}
function CallSiteToString() {
var fileName;
var fileLocation = "";
if (this.isNative()) {
fileLocation = "native";
} else if (this.isEval()) {
fileName = this.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = this.getEvalOrigin();
}
} else {
fileName = this.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = this.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = this.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = this.getFunctionName();
var addSuffix = true;
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
var typeName = GetTypeName(this, true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName && functionName.indexOf(typeName) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName && functionName.lastIndexOf("." + methodName) !=
functionName.length - methodName.length - 1) {
line += " [as " + methodName + "]";
}
} else {
line += typeName + "." + (methodName || "<anonymous>");
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addSuffix = false;
}
if (addSuffix) {
line += " (" + fileLocation + ")";
}
return line;
}
SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getThis", CallSiteGetThis,
"getTypeName", CallSiteGetTypeName,
@ -934,7 +994,8 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
"getColumnNumber", CallSiteGetColumnNumber,
"isNative", CallSiteIsNative,
"getPosition", CallSiteGetPosition,
"isConstructor", CallSiteIsConstructor
"isConstructor", CallSiteIsConstructor,
"toString", CallSiteToString
));
@ -976,65 +1037,6 @@ function FormatEvalOrigin(script) {
return eval_origin;
}
function FormatSourcePosition(frame) {
var fileName;
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
fileName = frame.getScriptNameOrSourceURL();
if (!fileName) {
fileLocation = frame.getEvalOrigin();
}
} else {
fileName = frame.getFileName();
}
if (fileName) {
fileLocation += fileName;
var lineNumber = frame.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = frame.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = frame.getFunction().name;
var addPrefix = true;
var isConstructor = frame.isConstructor();
var isMethodCall = !(frame.isToplevel() || isConstructor);
if (isMethodCall) {
var methodName = frame.getMethodName();
line += frame.getTypeName() + ".";
if (functionName) {
line += functionName;
if (methodName && (methodName != functionName)) {
line += " [as " + methodName + "]";
}
} else {
line += methodName || "<anonymous>";
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addPrefix = false;
}
if (addPrefix) {
line += " (" + fileLocation + ")";
}
return line;
}
function FormatStackTrace(error, frames) {
var lines = [];
try {
@ -1050,7 +1052,7 @@ function FormatStackTrace(error, frames) {
var frame = frames[i];
var line;
try {
line = FormatSourcePosition(frame);
line = frame.toString();
} catch (e) {
try {
line = "<error: " + e + ">";
@ -1081,6 +1083,19 @@ function FormatRawStackTrace(error, raw_stack) {
}
}
function GetTypeName(obj, requireConstructor) {
var constructor = obj.receiver.constructor;
if (!constructor) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
var constructorName = constructor.name;
if (!constructorName) {
return requireConstructor ? null :
%_CallFunction(obj.receiver, ObjectToString);
}
return constructorName;
}
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;

9
deps/v8/src/mips/builtins-mips.cc

@ -118,7 +118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
__ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@ -214,7 +214,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
__ LoadInitialArrayMap(array_function, scratch2,
elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@ -449,10 +450,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code);
__ bind(&not_double);
// Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
// Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,

38
deps/v8/src/mips/code-stubs-mips.cc

@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 8;
const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
@ -5054,27 +5054,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment):
// [sp + 5] - Argument 9
// [sp + 4] - Argument 8
// [sp + 3] - Argument 7
// [sp + 2] - Argument 6
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
// Argument 8: Pass current isolate address.
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address()));
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 7: Indicate that this is a direct call from JavaScript.
// Argument 8: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1));
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
// Argument 6: Start (high end) of backtracking stack memory area.
// Argument 7: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2);
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer.
@ -5125,7 +5131,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
@ -7362,8 +7370,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// KeyedStoreStubCompiler::GenerateStoreFastElement.
{ REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
// ElementsTransitionGenerator::GenerateSmiOnlyToObject
// and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
// and ElementsTransitionGenerator::GenerateSmiToDouble
// and ElementsTransitionGenerator::GenerateDoubleToObject
{ REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
{ REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
@ -7629,9 +7637,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
Label fast_elements;
__ CheckFastElements(a2, t1, &double_elements);
// FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
// Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
__ JumpIfSmi(a0, &smi_element);
__ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
__ CheckFastSmiElements(a2, t1, &fast_elements);
// Store into the array literal requires a elements transition. Call into
// the runtime.
@ -7643,7 +7651,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Push(t1, t0);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
// Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
__ bind(&fast_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7656,8 +7664,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
// and value is Smi.
__ bind(&smi_element);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
@ -7666,7 +7674,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,

4
deps/v8/src/mips/codegen-mips.cc

@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
}
void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value

106
deps/v8/src/mips/full-codegen-mips.cc

@ -1711,7 +1711,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
bool has_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@ -1733,8 +1734,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode = has_fast_elements
? FastCloneShallowArrayStub::CLONE_ELEMENTS
@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
if (constant_elements_kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ lw(t2, MemOperand(sp)); // Copy of array literal.
__ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
@ -3500,104 +3500,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
}
void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 3);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
Label done;
Label slow_case;
Register object = a0;
Register index1 = a1;
Register index2 = a2;
Register elements = a3;
Register scratch1 = t0;
Register scratch2 = t1;
__ lw(object, MemOperand(sp, 2 * kPointerSize));
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ GetObjectType(object, scratch1, scratch2);
__ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
// Map is now in scratch1.
__ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
__ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
__ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
// Check the object's elements are in fast case and writable.
__ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
__ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
__ Branch(&slow_case, ne, scratch1, Operand(scratch2));
// Check that both indices are smis.
__ lw(index1, MemOperand(sp, 1 * kPointerSize));
__ lw(index2, MemOperand(sp, 0));
__ JumpIfNotBothSmi(index1, index2, &slow_case);
// Check that both indices are valid.
Label not_hi;
__ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
__ Branch(&slow_case, ls, scratch1, Operand(index1));
__ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
__ Branch(&slow_case, ls, scratch1, Operand(index2));
__ bind(&not_hi);
// Bring the address of the elements into index1 and index2.
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(index1, scratch1, index1);
__ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
__ Addu(index2, scratch1, index2);
// Swap elements.
__ lw(scratch1, MemOperand(index1, 0));
__ lw(scratch2, MemOperand(index2, 0));
__ sw(scratch1, MemOperand(index2, 0));
__ sw(scratch2, MemOperand(index1, 0));
Label no_remembered_set;
__ CheckPageFlag(elements,
scratch1,
1 << MemoryChunk::SCAN_ON_SCAVENGE,
ne,
&no_remembered_set);
// Possible optimization: do a check that both values are Smis
// (or them and test against Smi mask).
// We are swapping two objects in an array and the incremental marker never
// pauses in the middle of scanning a single object. Therefore the
// incremental marker is not disturbed, so we don't need to call the
// RecordWrite stub that notifies the incremental marker.
__ RememberedSetHelper(elements,
index1,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ RememberedSetHelper(elements,
index2,
scratch2,
kDontSaveFPRegs,
MacroAssembler::kFallThroughAtEnd);
__ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
__ Drop(3);
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
context()->Plug(v0);
}
void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(2, args->length());

23
deps/v8/src/mips/ic-mips.cc

@ -1347,34 +1347,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&non_double_value, ne, t0, Operand(at));
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
// FAST_DOUBLE_ELEMENTS and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
// and complete the store.
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_DOUBLE_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
__ bind(&non_double_value);
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
// Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
t0,
&slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
__ bind(&transition_double_elements);
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
// Elements are double, but value is an Object that's not a HeapNumber. Make
// sure that the receiver is a Array with Object elements and transition array
// from double elements to Object elements.
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
FAST_ELEMENTS,
receiver_map,
@ -1471,7 +1472,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);

114
deps/v8/src/mips/lithium-codegen-mips.cc

@ -2343,6 +2343,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
Register object = ToRegister(instr->object());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
int map_count = instr->hydrogen()->types()->length();
bool need_generic = instr->hydrogen()->need_generic();
@ -2357,8 +2358,8 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
bool last = (i == map_count - 1);
Handle<Map> map = instr->hydrogen()->types()->at(i);
if (last && !need_generic) {
Handle<Map> map = instr->hydrogen()->types()->last();
DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
EmitLoadFieldOrConstantFunction(result, object, map, name);
} else {
Label next;
__ Branch(&next, ne, scratch, Operand(map));
@ -2447,8 +2448,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
__ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ Ext(scratch, scratch, Map::kElementsKindShift,
Map::kElementsKindBitCount);
__ Branch(&done, eq, scratch,
Operand(FAST_ELEMENTS));
__ Branch(&fail, lt, scratch,
Operand(GetInitialFastElementsKind()));
__ Branch(&done, le, scratch,
Operand(TERMINAL_FAST_ELEMENTS_KIND));
__ Branch(&fail, lt, scratch,
Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
__ Branch(&done, le, scratch,
@ -2501,7 +2504,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
// Load the result.
__ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
__ addu(scratch, elements, scratch);
__ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
uint32_t offset = FixedArray::kHeaderSize +
(instr->additional_index() << kPointerSizeLog2);
__ lw(result, FieldMemOperand(scratch, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
@ -2532,17 +2537,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
}
if (key_is_constant) {
__ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ Addu(elements, elements,
Operand(((constant_key + instr->additional_index()) << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch));
__ Addu(elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
(instr->additional_index() << shift_size)));
}
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
__ ldc1(result, MemOperand(elements));
}
@ -2564,32 +2573,41 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister result = ToDoubleRegister(instr->result());
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
__ Addu(scratch0(), external_pointer, constant_key << shift_size);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0()));
__ lwc1(result, MemOperand(scratch0(), additional_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ ldc1(result, MemOperand(scratch0()));
__ ldc1(result, MemOperand(scratch0(), additional_offset));
}
} else {
Register result = ToRegister(instr->result());
Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg);
if (key_is_constant) {
mem_operand = MemOperand(external_pointer,
constant_key * (1 << shift_size));
mem_operand =
MemOperand(external_pointer,
(constant_key << shift_size) + additional_offset);
} else {
__ sll(scratch, key, shift_size);
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@ -2622,7 +2640,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3504,11 +3525,17 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
int offset =
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
(ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+ FixedArray::kHeaderSize;
__ sw(value, FieldMemOperand(elements, offset));
} else {
__ sll(scratch, key, kPointerSizeLog2);
__ addu(scratch, elements, scratch);
if (instr->additional_index() != 0) {
__ Addu(scratch,
scratch,
instr->additional_index() << kPointerSizeLog2);
}
__ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
}
@ -3551,7 +3578,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
if (key_is_constant) {
__ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
__ Addu(scratch, elements, Operand((constant_key << shift_size) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size);
@ -3572,7 +3599,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
}
__ bind(&not_nan);
__ sdc1(value, MemOperand(scratch));
__ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size));
}
@ -3593,12 +3620,13 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
key = ToRegister(instr->key());
}
int shift_size = ElementsKindToShiftSize(elements_kind);
int additional_offset = instr->additional_index() << shift_size;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
FPURegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
__ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
__ Addu(scratch0(), external_pointer, constant_key << shift_size);
} else {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
@ -3606,19 +3634,27 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
__ swc1(double_scratch0(), MemOperand(scratch0()));
__ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ sdc1(value, MemOperand(scratch0()));
__ sdc1(value, MemOperand(scratch0(), additional_offset));
}
} else {
Register value(ToRegister(instr->value()));
MemOperand mem_operand(zero_reg);
Register scratch = scratch0();
if (instr->additional_index() != 0 && !key_is_constant) {
__ Addu(scratch, key, instr->additional_index());
}
MemOperand mem_operand(zero_reg);
if (key_is_constant) {
mem_operand = MemOperand(external_pointer,
constant_key * (1 << shift_size));
((constant_key + instr->additional_index())
<< shift_size));
} else {
__ sll(scratch, key, shift_size);
if (instr->additional_index() == 0) {
__ sll(scratch, key, shift_size);
} else {
__ sll(scratch, scratch, shift_size);
}
__ Addu(scratch, scratch, external_pointer);
mem_operand = MemOperand(scratch);
}
@ -3640,7 +3676,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3678,20 +3717,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
} else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
} else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp_reg());
ASSERT(fixed_object_reg.is(a2));
ASSERT(new_map_reg.is(a3));
@ -4446,8 +4486,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
@ -4600,10 +4641,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the literal boilerplate ElementsKind is of a type different than
// the expected one. The check isn't necessary if the boilerplate has already
// been converted to FAST_ELEMENTS.
if (boilerplate_elements_kind != FAST_ELEMENTS) {
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
// Load map into a2.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));

5
deps/v8/src/mips/lithium-mips.cc

@ -2023,8 +2023,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
ElementsKind from_kind = instr->original_map()->elements_kind();
ElementsKind to_kind = instr->transitioned_map()->elements_kind();
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =

10
deps/v8/src/mips/lithium-mips.h

@ -1201,6 +1201,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1217,13 +1218,14 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
@ -1237,6 +1239,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1705,6 +1708,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1727,6 +1731,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -1771,6 +1776,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};

87
deps/v8/src/mips/macro-assembler-mips.cc

@ -3341,33 +3341,39 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastElementValue));
Operand(Map::kMaximumBitField2FastHoleyElementValue));
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
}
@ -3469,22 +3475,17 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
Operand right = Operand(map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(transitioned_double_map));
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind, NULL);
if (!current_map) break;
Branch(early_success, eq, scratch, right);
right = Operand(Handle<Map>(current_map));
}
}
}
@ -4443,27 +4444,37 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
// Check that the function's map is the same as the expected cached map.
int expected_index =
Context::GetContextMapIndexFromElementsKind(expected_kind);
lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
Branch(no_map_match, ne, map_in_out, Operand(at));
lw(scratch,
MemOperand(scratch,
Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
size_t offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
Branch(no_map_match, ne, map_in_out, Operand(scratch));
// Use the transitioned cached map.
int trans_index =
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
offset = transitioned_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
lw(map_in_out, FieldMemOperand(scratch, offset));
}
void MacroAssembler::LoadInitialArrayMap(
Register function_in, Register scratch, Register map_out) {
Register function_in, Register scratch,
Register map_out, bool can_have_holes) {
ASSERT(!function_in.is(map_out));
Label done;
lw(map_out, FieldMemOperand(function_in,
JSFunction::kPrototypeOrInitialMapOffset));
if (!FLAG_smi_only_arrays) {
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
FAST_ELEMENTS,
ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
kind,
map_out,
scratch,
&done);
} else if (can_have_holes) {
LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_HOLEY_SMI_ELEMENTS,
map_out,
scratch,
&done);

9
deps/v8/src/mips/macro-assembler-mips.h

@ -819,7 +819,8 @@ class MacroAssembler: public Assembler {
// Load the initial map for new Arrays from a JSFunction.
void LoadInitialArrayMap(Register function_in,
Register scratch,
Register map_out);
Register map_out,
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
@ -961,9 +962,9 @@ class MacroAssembler: public Assembler {
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in

160
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -43,44 +43,49 @@ namespace internal {
#ifndef V8_INTERPRETED_REGEXP
/*
* This assembler uses the following register assignment convention
* - t7 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
* - t1 : Pointer to current code object (Code*) including heap object tag.
* - t2 : Current position in input, as negative offset from end of string.
* Please notice that this is the byte offset, not the character offset!
* - t3 : Currently loaded character. Must be loaded using
* LoadCurrentCharacter before using any of the dispatch methods.
* - t4 : points to tip of backtrack stack
* - t4 : Points to tip of backtrack stack
* - t5 : Unused.
* - t6 : End of input (points to byte after last character in input).
* - fp : Frame pointer. Used to access arguments, local variables and
* RegExp registers.
* - sp : points to tip of C stack.
* - sp : Points to tip of C stack.
*
* The remaining registers are free for computations.
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
*
* - fp[56] direct_call (if 1, direct call from JavaScript code,
* - fp[64] Isolate* isolate (address of the current isolate)
* - fp[60] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[52] stack_area_base (High end of the memory area to use as
* - fp[56] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[52] capture array size (may fit multiple sets of matches)
* - fp[48] int* capture_array (int[num_saved_registers_], for output).
* - fp[44] secondary link/return address used by native call.
* --- sp when called ---
* - fp[40] return address (lr).
* - fp[36] old frame pointer (r11).
* - fp[40] return address (lr).
* - fp[36] old frame pointer (r11).
* - fp[0..32] backup of registers s0..s7.
* --- frame pointer ----
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-4] end of input (address of end of string).
* - fp[-8] start of input (address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character
* - fp[-20] success counter (only for global regexps to count matches).
* - fp[-24] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-24] At start (if 1, we are starting at the start of the
* - fp[-28] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first
* - fp[-32] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
@ -201,8 +206,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(&not_at_start, eq, a0, Operand(zero_reg));
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
@ -214,8 +219,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg));
__ lw(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ lw(a1, MemOperand(frame_pointer(), kInputStart));
__ Addu(a0, end_of_input_address(), Operand(current_input_offset()));
@ -640,6 +645,7 @@ void RegExpMacroAssemblerMIPS::Fail() {
Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
Label return_v0;
if (masm_->has_exception()) {
// If the code gets corrupted due to long regular expressions and lack of
// space on trampolines, an internal exception flag is set. If this case
@ -669,8 +675,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ Addu(frame_pointer(), sp, Operand(4 * kPointerSize));
__ mov(a0, zero_reg);
__ push(a0); // Make room for success counter and initialize it to 0.
__ push(a0); // Make room for "position - 1" constant (value irrelevant).
__ push(a0); // Make room for "at start" constant (value irrelevant).
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@ -689,12 +696,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ li(v0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_v0);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(a0);
// If returned value is non-zero, we exit with the returned value as result.
__ Branch(&exit_label_, ne, v0, Operand(zero_reg));
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
__ bind(&stack_ok);
// Allocate space on stack for registers.
@ -715,39 +722,44 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// position registers.
__ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ mov(t5, a1);
__ li(a1, Operand(1));
__ Movn(a1, zero_reg, t5);
__ sw(a1, MemOperand(frame_pointer(), kAtStart));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
__ li(current_character(), Operand('\n'));
__ jmp(&start_regexp);
// Global regexp restarts matching here.
__ bind(&load_char_start_regexp);
// Load previous char as initial value of current character register.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&start_regexp);
// Initialize on-stack registers.
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1.
// Address of register 0.
__ Addu(a1, frame_pointer(), Operand(kRegisterZero));
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ sw(a0, MemOperand(a1));
__ Addu(a1, a1, Operand(-kPointerSize));
__ Subu(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
if (num_saved_registers_ > 8) {
// Address of register 0.
__ Addu(a1, frame_pointer(), Operand(kRegisterZero));
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ sw(a0, MemOperand(a1));
__ Addu(a1, a1, Operand(-kPointerSize));
__ Subu(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
for (int i = 0; i < num_saved_registers_; i++) {
__ sw(a0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
__ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
// Load previous char as initial value of current character register.
Label at_start;
__ lw(a0, MemOperand(frame_pointer(), kAtStart));
__ Branch(&at_start, ne, a0, Operand(zero_reg));
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
__ li(current_character(), Operand('\n'));
__ jmp(&start_label_);
@ -776,6 +788,10 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
for (int i = 0; i < num_saved_registers_; i += 2) {
__ lw(a2, register_location(i));
__ lw(a3, register_location(i + 1));
if (global()) {
// Keep capture start in a4 for the zero-length check later.
__ mov(t7, a2);
}
if (mode_ == UC16) {
__ sra(a2, a2, 1);
__ Addu(a2, a2, a1);
@ -791,10 +807,52 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Addu(a0, a0, kPointerSize);
}
}
__ li(v0, Operand(SUCCESS));
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
__ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ lw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ Addu(a0, a0, 1);
__ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Subu(a1, a1, num_saved_registers_);
// Check whether we have enough room for another set of capture results.
__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
__ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ Addu(a2, a2, num_saved_registers_ * kPointerSize);
__ sw(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
__ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Special case for zero-length matches.
// t7: capture start index
// Not a zero-length match, restart.
__ Branch(
&load_char_start_regexp, ne, current_input_offset(), Operand(t7));
// Offset from the end is zero if we already reached the end.
__ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg));
// Advance current position after a zero-length match.
__ Addu(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
__ Branch(&load_char_start_regexp);
} else {
__ li(v0, Operand(SUCCESS));
}
}
// Exit and return v0.
__ bind(&exit_label_);
if (global()) {
__ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_v0);
// Skip sp past regexp registers and local variables..
__ mov(sp, frame_pointer());
// Restore registers s0..s7 and return (restoring ra to pc).
@ -820,7 +878,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ MultiPop(regexp_registers_to_retain);
// If returning non-zero, we should end execution with the given
// result as return value.
__ Branch(&exit_label_, ne, v0, Operand(zero_reg));
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame.
__ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
@ -864,7 +922,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
__ li(v0, Operand(EXCEPTION));
__ jmp(&exit_label_);
__ jmp(&return_v0);
}
}
@ -1012,8 +1070,9 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
}
void RegExpMacroAssemblerMIPS::Succeed() {
bool RegExpMacroAssemblerMIPS::Succeed() {
__ jmp(&success_label_);
return global();
}
@ -1280,8 +1339,9 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
int characters) {
Register offset = current_input_offset();
if (cp_offset != 0) {
__ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
offset = a0;
// t7 is not being used to store the capture start index at this point.
__ Addu(t7, current_input_offset(), Operand(cp_offset * char_size()));
offset = t7;
}
// We assume that we cannot do unaligned loads on MIPS, so this function
// must only be used to load a single character at a time.

11
deps/v8/src/mips/regexp-macro-assembler-mips.h

@ -115,7 +115,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to);
virtual void Succeed();
virtual bool Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
@ -141,7 +141,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStackFrameHeader = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
@ -153,10 +154,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kInputString = kStartIndex - kPointerSize;
// When adding local variables remember to push space for them in
// the frame in GetCode.
static const int kInputStartMinusOne = kInputString - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
static const int kSuccessfulCaptures = kInputString - kPointerSize;
static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize;
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

10
deps/v8/src/mips/simulator-mips.h

@ -50,16 +50,16 @@ namespace internal {
entry(p0, p1, p2, p3, p4)
typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*);
void*, int*, int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@ -403,9 +403,9 @@ class Simulator {
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \

51
deps/v8/src/mips/stub-cache-mips.cc

@ -1585,16 +1585,29 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
__ CheckFastSmiElements(a3, t3, &call_builtin);
// edx: receiver
// r3: map
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a3,
t3,
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ jmp(&fast_object);
__ bind(&try_holey_map);
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
a3,
t3,
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
ElementsTransitionGenerator::
GenerateMapChangeElementsTransition(masm());
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@ -3372,9 +3385,12 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3508,8 +3524,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3869,8 +3888,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -3934,8 +3956,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4106,8 +4131,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
@ -4286,7 +4314,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
}
@ -4314,7 +4342,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ bind(&finish_store);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (IsFastSmiElementsKind(elements_kind)) {
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4323,7 +4351,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
} else {
ASSERT(elements_kind == FAST_ELEMENTS);
ASSERT(IsFastObjectElementsKind(elements_kind));
__ Addu(scratch,
elements_reg,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4332,7 +4360,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
__ Addu(scratch, scratch, scratch2);
__ sw(value_reg, MemOperand(scratch));
__ mov(receiver_reg, value_reg);
ASSERT(elements_kind == FAST_ELEMENTS);
__ RecordWrite(elements_reg, // Object.
scratch, // Address.
receiver_reg, // Value.

52
deps/v8/src/objects-debug.cc

@ -286,12 +286,11 @@ void JSObject::JSObjectVerify() {
(map()->inobject_properties() + properties()->length() -
map()->NextFreePropertyIndex()));
}
ASSERT_EQ((map()->has_fast_elements() ||
map()->has_fast_smi_only_elements() ||
ASSERT_EQ((map()->has_fast_smi_or_object_elements() ||
(elements() == GetHeap()->empty_fixed_array())),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
ASSERT(map()->has_fast_elements() == HasFastElements());
ASSERT(map()->has_fast_object_elements() == HasFastObjectElements());
}
@ -458,10 +457,17 @@ void String::StringVerify() {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
} else if (IsSeqAsciiString()) {
SeqAsciiString::cast(this)->SeqAsciiStringVerify();
}
}
void SeqAsciiString::SeqAsciiStringVerify() {
CHECK(String::IsAscii(GetChars(), length()));
}
void ConsString::ConsStringVerify() {
CHECK(this->first()->IsString());
CHECK(this->second() == GetHeap()->empty_string() ||
@ -510,7 +516,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
CHECK(HasFastElements());
CHECK(HasFastObjectElements());
CHECK_EQ(0, FixedArray::cast(elements())->length());
}
@ -805,6 +811,11 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
// Indexed properties
switch (GetElementsKind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
@ -818,6 +829,14 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_unused_elements_ += holes;
break;
}
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
ExternalPixelArray* e = ExternalPixelArray::cast(elements());
@ -831,8 +850,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
dict->Capacity() - dict->NumberOfElements();
break;
}
default:
UNREACHABLE();
case NON_STRICT_ARGUMENTS_ELEMENTS:
break;
}
}
@ -992,6 +1010,28 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
}
void Map::ZapInstanceDescriptors() {
DescriptorArray* descriptors = instance_descriptors();
if (descriptors == GetHeap()->empty_descriptor_array()) return;
FixedArray* contents = FixedArray::cast(
descriptors->get(DescriptorArray::kContentArrayIndex));
MemsetPointer(descriptors->data_start(),
GetHeap()->the_hole_value(),
descriptors->length());
MemsetPointer(contents->data_start(),
GetHeap()->the_hole_value(),
contents->length());
}
void Map::ZapPrototypeTransitions() {
FixedArray* proto_transitions = prototype_transitions();
MemsetPointer(proto_transitions->data_start(),
GetHeap()->the_hole_value(),
proto_transitions->length());
}
#endif // DEBUG
} } // namespace v8::internal

247
deps/v8/src/objects-inl.h

@ -128,18 +128,6 @@ PropertyDetails PropertyDetails::AsDeleted() {
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
if (to_kind == FAST_ELEMENTS) {
return from_kind == FAST_SMI_ONLY_ELEMENTS ||
from_kind == FAST_DOUBLE_ELEMENTS;
} else {
return to_kind == FAST_DOUBLE_ELEMENTS &&
from_kind == FAST_SMI_ONLY_ELEMENTS;
}
}
bool Object::IsFixedArrayBase() {
return IsFixedArray() || IsFixedDoubleArray();
}
@ -1244,35 +1232,26 @@ FixedArrayBase* JSObject::elements() {
return static_cast<FixedArrayBase*>(array);
}
void JSObject::ValidateSmiOnlyElements() {
void JSObject::ValidateElements() {
#if DEBUG
if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
Heap* heap = GetHeap();
// Don't use elements, since integrity checks will fail if there
// are filler pointers in the array.
FixedArray* fixed_array =
reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
Map* map = fixed_array->map();
// Arrays that have been shifted in place can't be verified.
if (map != heap->raw_unchecked_one_pointer_filler_map() &&
map != heap->raw_unchecked_two_pointer_filler_map() &&
map != heap->free_space_map()) {
for (int i = 0; i < fixed_array->length(); i++) {
Object* current = fixed_array->get(i);
ASSERT(current->IsSmi() || current->IsTheHole());
}
}
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = GetElementsAccessor();
accessor->Validate(this);
}
#endif
}
MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
#if DEBUG
ValidateSmiOnlyElements();
#endif
if ((map()->elements_kind() != FAST_ELEMENTS)) {
return TransitionElementsKind(FAST_ELEMENTS);
ValidateElements();
ElementsKind elements_kind = map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
return TransitionElementsKind(FAST_HOLEY_ELEMENTS);
} else {
return TransitionElementsKind(FAST_ELEMENTS);
}
}
return this;
}
@ -1284,20 +1263,34 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
ElementsKind current_kind = map()->elements_kind();
ElementsKind target_kind = current_kind;
ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
if (current_kind == FAST_ELEMENTS) return this;
bool is_holey = IsFastHoleyElementsKind(current_kind);
if (current_kind == FAST_HOLEY_ELEMENTS) return this;
Heap* heap = GetHeap();
Object* the_hole = heap->the_hole_value();
Object* heap_number_map = heap->heap_number_map();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (!current->IsSmi() && current != the_hole) {
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
} else if (!current->IsSmi()) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
HeapObject::cast(current)->map() == heap_number_map) {
target_kind = FAST_DOUBLE_ELEMENTS;
HeapObject::cast(current)->map() == heap_number_map &&
IsFastSmiElementsKind(target_kind)) {
if (is_holey) {
target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_DOUBLE_ELEMENTS;
}
} else {
target_kind = FAST_ELEMENTS;
break;
if (!current->IsNumber()) {
if (is_holey) {
target_kind = FAST_HOLEY_ELEMENTS;
break;
} else {
target_kind = FAST_ELEMENTS;
}
}
}
}
}
@ -1310,6 +1303,7 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
uint32_t length,
EnsureElementsMode mode) {
if (elements->map() != GetHeap()->fixed_double_array_map()) {
ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
@ -1318,11 +1312,19 @@ MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
return EnsureCanContainElements(objects, elements->length(), mode);
return EnsureCanContainElements(objects, length, mode);
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
} else if (GetElementsKind() == FAST_SMI_ELEMENTS) {
FixedDoubleArray* double_array = FixedDoubleArray::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS);
}
}
return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
}
@ -1334,21 +1336,20 @@ MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
ElementsKind to_kind) {
Map* current_map = map();
ElementsKind from_kind = current_map->elements_kind();
if (from_kind == to_kind) return current_map;
Context* global_context = isolate->context()->global_context();
if (current_map == global_context->smi_js_array_map()) {
if (to_kind == FAST_ELEMENTS) {
return global_context->object_js_array_map();
} else {
if (to_kind == FAST_DOUBLE_ELEMENTS) {
return global_context->double_js_array_map();
} else {
ASSERT(to_kind == DICTIONARY_ELEMENTS);
Object* maybe_array_maps = global_context->js_array_maps();
if (maybe_array_maps->IsFixedArray()) {
FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
if (array_maps->get(from_kind) == current_map) {
Object* maybe_transitioned_map = array_maps->get(to_kind);
if (maybe_transitioned_map->IsMap()) {
return Map::cast(maybe_transitioned_map);
}
}
}
return GetElementsTransitionMapSlow(to_kind);
}
@ -1357,9 +1358,6 @@ void JSObject::set_map_and_elements(Map* new_map,
FixedArrayBase* value,
WriteBarrierMode mode) {
ASSERT(value->HasValidElements());
#ifdef DEBUG
ValidateSmiOnlyElements();
#endif
if (new_map != NULL) {
if (mode == UPDATE_WRITE_BARRIER) {
set_map(new_map);
@ -1368,8 +1366,7 @@ void JSObject::set_map_and_elements(Map* new_map,
set_map_no_write_barrier(new_map);
}
}
ASSERT((map()->has_fast_elements() ||
map()->has_fast_smi_only_elements() ||
ASSERT((map()->has_fast_smi_or_object_elements() ||
(value == GetHeap()->empty_fixed_array())) ==
(value->map() == GetHeap()->fixed_array_map() ||
value->map() == GetHeap()->fixed_cow_array_map()));
@ -1392,8 +1389,7 @@ void JSObject::initialize_properties() {
void JSObject::initialize_elements() {
ASSERT(map()->has_fast_elements() ||
map()->has_fast_smi_only_elements() ||
ASSERT(map()->has_fast_smi_or_object_elements() ||
map()->has_fast_double_elements());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
@ -1402,9 +1398,10 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
Object* obj;
ElementsKind elements_kind = FLAG_smi_only_arrays
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
ElementsKind elements_kind = GetInitialFastElementsKind();
if (!FLAG_smi_only_arrays) {
elements_kind = FastSmiToObjectElementsKind(elements_kind);
}
MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
elements_kind);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -1676,6 +1673,11 @@ Object* FixedArray::get(int index) {
}
bool FixedArray::is_the_hole(int index) {
return get(index) == GetHeap()->the_hole_value();
}
void FixedArray::set(int index, Smi* value) {
ASSERT(map() != HEAP->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
@ -2857,15 +2859,15 @@ bool Map::has_non_instance_prototype() {
void Map::set_function_with_prototype(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype));
set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype));
} else {
set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype));
set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype));
}
}
bool Map::function_with_prototype() {
return ((1 << kFunctionWithPrototype) & bit_field2()) != 0;
return ((1 << kFunctionWithPrototype) & bit_field3()) != 0;
}
@ -3351,6 +3353,9 @@ void Map::clear_instance_descriptors() {
Object* object = READ_FIELD(this,
kInstanceDescriptorsOrBitField3Offset);
if (!object->IsSmi()) {
#ifdef DEBUG
ZapInstanceDescriptors();
#endif
WRITE_FIELD(
this,
kInstanceDescriptorsOrBitField3Offset,
@ -3376,6 +3381,11 @@ void Map::set_instance_descriptors(DescriptorArray* value,
}
}
ASSERT(!is_shared());
#ifdef DEBUG
if (value != instance_descriptors()) {
ZapInstanceDescriptors();
}
#endif
WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
CONDITIONAL_WRITE_BARRIER(
heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
@ -3448,6 +3458,11 @@ void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
Heap* heap = GetHeap();
ASSERT(value != heap->empty_fixed_array());
value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
#ifdef DEBUG
if (value != prototype_transitions()) {
ZapPrototypeTransitions();
}
#endif
WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
CONDITIONAL_WRITE_BARRIER(
heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
@ -3995,27 +4010,32 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
global_context->get(Context::ARRAY_FUNCTION_INDEX);
if (array_function->IsJSFunction() &&
this == JSFunction::cast(array_function)) {
ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
MaybeObject* maybe_map = initial_map->CopyDropTransitions();
Map* new_double_map = NULL;
if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
new_double_map);
if (maybe_map->IsFailure()) return maybe_map;
maybe_map = new_double_map->CopyDropTransitions();
Map* new_object_map = NULL;
if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
new_object_map->set_elements_kind(FAST_ELEMENTS);
maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
new_object_map);
if (maybe_map->IsFailure()) return maybe_map;
global_context->set_smi_js_array_map(initial_map);
global_context->set_double_js_array_map(new_double_map);
global_context->set_object_js_array_map(new_object_map);
// Replace all of the cached initial array maps in the global context with
// the appropriate transitioned elements kind maps.
Heap* heap = GetHeap();
MaybeObject* maybe_maps =
heap->AllocateFixedArrayWithHoles(kElementsKindCount);
FixedArray* maps;
if (!maybe_maps->To(&maps)) return maybe_maps;
Map* current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
ASSERT(kind == GetInitialFastElementsKind());
maps->set(kind, current_map);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
ElementsKind transitioned_kind = GetFastElementsKindFromSequenceIndex(i);
MaybeObject* maybe_new_map = current_map->CopyDropTransitions();
Map* new_map = NULL;
if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
new_map->set_elements_kind(transitioned_kind);
maybe_new_map = current_map->AddElementsTransition(transitioned_kind,
new_map);
if (maybe_new_map->IsFailure()) return maybe_new_map;
maps->set(transitioned_kind, new_map);
current_map = new_map;
}
global_context->set_js_array_maps(maps);
}
set_initial_map(initial_map);
return this;
@ -4351,18 +4371,18 @@ ElementsKind JSObject::GetElementsKind() {
FixedArrayBase* fixed_array =
reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
Map* map = fixed_array->map();
ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(kind == FAST_DOUBLE_ELEMENTS &&
(fixed_array->IsFixedDoubleArray() ||
fixed_array == GetHeap()->empty_fixed_array())) ||
(kind == DICTIONARY_ELEMENTS &&
ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(kind) &&
(fixed_array->IsFixedDoubleArray() ||
fixed_array == GetHeap()->empty_fixed_array())) ||
(kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
#endif
return kind;
}
@ -4373,25 +4393,28 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
}
bool JSObject::HasFastElements() {
return GetElementsKind() == FAST_ELEMENTS;
bool JSObject::HasFastObjectElements() {
return IsFastObjectElementsKind(GetElementsKind());
}
bool JSObject::HasFastSmiOnlyElements() {
return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
bool JSObject::HasFastSmiElements() {
return IsFastSmiElementsKind(GetElementsKind());
}
bool JSObject::HasFastTypeElements() {
ElementsKind elements_kind = GetElementsKind();
return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_ELEMENTS;
bool JSObject::HasFastSmiOrObjectElements() {
return IsFastSmiOrObjectElementsKind(GetElementsKind());
}
bool JSObject::HasFastDoubleElements() {
return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
return IsFastDoubleElementsKind(GetElementsKind());
}
bool JSObject::HasFastHoleyElements() {
return IsFastHoleyElementsKind(GetElementsKind());
}
@ -4448,7 +4471,7 @@ bool JSObject::HasIndexedInterceptor() {
MaybeObject* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastTypeElements());
ASSERT(HasFastSmiOrObjectElements());
FixedArray* elems = FixedArray::cast(elements());
Isolate* isolate = GetIsolate();
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@ -4806,7 +4829,7 @@ void Map::ClearCodeCache(Heap* heap) {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastTypeElements());
ASSERT(HasFastSmiOrObjectElements());
FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
@ -4838,13 +4861,13 @@ bool JSArray::AllowsSetElementsLength() {
MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
MaybeObject* maybe_result = EnsureCanContainElements(
storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS);
if (maybe_result->IsFailure()) return maybe_result;
ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
IsFastDoubleElementsKind(GetElementsKind())) ||
((storage->map() != GetHeap()->fixed_double_array_map()) &&
((GetElementsKind() == FAST_ELEMENTS) ||
(GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
(IsFastObjectElementsKind(GetElementsKind()) ||
(IsFastSmiElementsKind(GetElementsKind()) &&
FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
set_length(Smi::FromInt(storage->length()));

5
deps/v8/src/objects-printer.cc

@ -318,7 +318,9 @@ void JSObject::PrintElements(FILE* out) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
switch (map()->elements_kind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
@ -329,6 +331,7 @@ void JSObject::PrintElements(FILE* out) {
}
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// Print in array notation for non-sparse arrays.
if (elements()->length() > 0) {

484
deps/v8/src/objects.cc

@ -56,11 +56,6 @@
namespace v8 {
namespace internal {
void PrintElementsKind(FILE* out, ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}
MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
Object* value) {
@ -543,7 +538,7 @@ bool JSObject::IsDirty() {
// If the object is fully fast case and has the same map it was
// created with then no changes can have been made to it.
return map() != fun->initial_map()
|| !HasFastElements()
|| !HasFastObjectElements()
|| !HasFastProperties();
}
@ -1067,7 +1062,9 @@ void String::StringShortPrint(StringStream* accumulator) {
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
switch (map()->instance_type()) {
case JS_ARRAY_TYPE: {
double length = JSArray::cast(this)->length()->Number();
double length = JSArray::cast(this)->length()->IsUndefined()
? 0
: JSArray::cast(this)->length()->Number();
accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
break;
}
@ -2202,34 +2199,29 @@ static Handle<T> MaybeNull(T* p) {
Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
ElementsKind elms_kind = elements_kind();
if (elms_kind == FAST_DOUBLE_ELEMENTS) {
bool dummy = true;
Handle<Map> fast_map =
MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
return fast_map;
}
return Handle<Map>::null();
}
if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
bool dummy = true;
Handle<Map> double_map =
MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
// In the current implementation, if the DOUBLE map doesn't exist, the
// FAST map can't exist either.
if (double_map.is_null()) return Handle<Map>::null();
Handle<Map> fast_map =
MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
&dummy));
if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
return fast_map;
}
if (ContainsMap(candidates, double_map)) return double_map;
}
return Handle<Map>::null();
ElementsKind kind = elements_kind();
Handle<Map> transitioned_map = Handle<Map>::null();
Handle<Map> current_map(this);
bool packed = IsFastPackedElementsKind(kind);
if (IsTransitionableFastElementsKind(kind)) {
while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) {
kind = GetNextMoreGeneralFastElementsKind(kind, false);
bool dummy = true;
Handle<Map> maybe_transitioned_map =
MaybeNull(current_map->LookupElementsTransitionMap(kind, &dummy));
if (maybe_transitioned_map.is_null()) break;
if (ContainsMap(candidates, maybe_transitioned_map) &&
(packed || !IsFastPackedElementsKind(kind))) {
transitioned_map = maybe_transitioned_map;
if (!IsFastPackedElementsKind(kind)) packed = false;
}
current_map = maybe_transitioned_map;
}
}
return transitioned_map;
}
static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
ElementsKind elements_kind) {
if (descriptor_contents->IsMap()) {
@ -2338,24 +2330,36 @@ Object* Map::GetDescriptorContents(String* sentinel_name,
}
Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
Map* Map::LookupElementsTransitionMap(ElementsKind to_kind,
bool* safe_to_add_transition) {
// Special case: indirect SMI->FAST transition (cf. comment in
// AddElementsTransition()).
if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
elements_kind == FAST_ELEMENTS) {
Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
safe_to_add_transition);
if (double_map == NULL) return double_map;
return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
ElementsKind from_kind = elements_kind();
if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
if (safe_to_add_transition) *safe_to_add_transition = false;
return NULL;
}
ElementsKind transitioned_from_kind =
GetNextMoreGeneralFastElementsKind(from_kind, false);
// If the transition is a single step in the transition sequence, fall
// through to looking it up and returning it. If it requires several steps,
// divide and conquer.
if (transitioned_from_kind != to_kind) {
// If the transition is several steps in the lattice, divide and conquer.
Map* from_map = LookupElementsTransitionMap(transitioned_from_kind,
safe_to_add_transition);
if (from_map == NULL) return NULL;
return from_map->LookupElementsTransitionMap(to_kind,
safe_to_add_transition);
}
}
Object* descriptor_contents = GetDescriptorContents(
elements_transition_sentinel_name(), safe_to_add_transition);
if (descriptor_contents != NULL) {
Map* maybe_transition_map =
GetElementsTransitionMapFromDescriptor(descriptor_contents,
elements_kind);
to_kind);
ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
return maybe_transition_map;
}
@ -2363,29 +2367,35 @@ Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
}
MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
MaybeObject* Map::AddElementsTransition(ElementsKind to_kind,
Map* transitioned_map) {
// The map transition graph should be a tree, therefore the transition
// from SMI to FAST elements is not done directly, but by going through
// DOUBLE elements first.
if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
elements_kind == FAST_ELEMENTS) {
bool safe_to_add = true;
Map* double_map = this->LookupElementsTransitionMap(
FAST_DOUBLE_ELEMENTS, &safe_to_add);
// This method is only called when safe_to_add_transition has been found
// to be true earlier.
ASSERT(safe_to_add);
if (double_map == NULL) {
MaybeObject* maybe_map = this->CopyDropTransitions();
if (!maybe_map->To(&double_map)) return maybe_map;
double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
MaybeObject* maybe_double_transition = this->AddElementsTransition(
FAST_DOUBLE_ELEMENTS, double_map);
if (maybe_double_transition->IsFailure()) return maybe_double_transition;
}
return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
ElementsKind from_kind = elements_kind();
if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
ASSERT(IsMoreGeneralElementsKindTransition(from_kind, to_kind));
ElementsKind transitioned_from_kind =
GetNextMoreGeneralFastElementsKind(from_kind, false);
// The map transitions graph should be a tree, therefore transitions to
// ElementsKind that are not adjacent in the ElementsKind sequence are not
// done directly, but instead by going through intermediate ElementsKinds
// first.
if (to_kind != transitioned_from_kind) {
bool safe_to_add = true;
Map* intermediate_map = LookupElementsTransitionMap(
transitioned_from_kind, &safe_to_add);
// This method is only called when safe_to_add has been found to be true
// earlier.
ASSERT(safe_to_add);
if (intermediate_map == NULL) {
MaybeObject* maybe_map = CopyDropTransitions();
if (!maybe_map->To(&intermediate_map)) return maybe_map;
intermediate_map->set_elements_kind(transitioned_from_kind);
MaybeObject* maybe_transition = AddElementsTransition(
transitioned_from_kind, intermediate_map);
if (maybe_transition->IsFailure()) return maybe_transition;
}
return intermediate_map->AddElementsTransition(to_kind, transitioned_map);
}
}
bool safe_to_add_transition = true;
@ -2437,10 +2447,11 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
!current_map->IsUndefined() &&
!current_map->is_shared();
// Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
// Prevent long chains of DICTIONARY -> FAST_*_ELEMENTS maps caused by objects
// with elements that switch back and forth between dictionary and fast
// element mode.
if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
// element modes.
if (from_kind == DICTIONARY_ELEMENTS &&
IsFastElementsKind(to_kind)) {
safe_to_add_transition = false;
}
@ -2967,12 +2978,18 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
// Preserve the attributes of this existing property.
attributes = result->GetAttributes();
return ConvertDescriptorToField(name, value, attributes);
case CALLBACKS:
return SetPropertyWithCallback(result->GetCallbackObject(),
case CALLBACKS: {
Object* callback_object = result->GetCallbackObject();
if (callback_object->IsAccessorPair() &&
!AccessorPair::cast(callback_object)->ContainsAccessor()) {
return ConvertDescriptorToField(name, value, attributes);
}
return SetPropertyWithCallback(callback_object,
name,
value,
result->holder(),
strict_mode);
}
case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes, strict_mode);
case CONSTANT_TRANSITION: {
@ -3476,8 +3493,7 @@ MaybeObject* JSObject::NormalizeElements() {
}
if (array->IsDictionary()) return array;
ASSERT(HasFastElements() ||
HasFastSmiOnlyElements() ||
ASSERT(HasFastSmiOrObjectElements() ||
HasFastDoubleElements() ||
HasFastArgumentsElements());
// Compute the effective length and allocate a new backing store.
@ -3512,8 +3528,7 @@ MaybeObject* JSObject::NormalizeElements() {
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
} else {
ASSERT(old_map->has_fast_elements() ||
old_map->has_fast_smi_only_elements());
ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
PropertyDetails details = PropertyDetails(NONE, NORMAL);
@ -4000,9 +4015,9 @@ MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
ASSERT(kind == FAST_ELEMENTS ||
ASSERT(IsFastObjectElementsKind(kind) ||
kind == DICTIONARY_ELEMENTS);
if (kind == FAST_ELEMENTS) {
if (IsFastObjectElementsKind(kind)) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: elements->length();
@ -4054,12 +4069,15 @@ bool JSObject::ReferencesObject(Object* obj) {
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
// Raw pixels and external arrays do not reference other
// objects.
break;
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
break;
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case DICTIONARY_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
@ -4075,7 +4093,8 @@ bool JSObject::ReferencesObject(Object* obj) {
}
// Check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS :
FAST_HOLEY_ELEMENTS;
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
@ -4309,7 +4328,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
}
// Search object and it's prototype chain for callback properties.
// Search object and its prototype chain for callback properties.
void JSObject::LookupCallback(String* name, LookupResult* result) {
Heap* heap = GetHeap();
for (Object* current = this;
@ -4353,9 +4372,12 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
Object* setter,
PropertyAttributes attributes) {
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -4472,7 +4494,7 @@ bool JSObject::CanSetCallback(String* name) {
GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
// Check if there is an API defined callback object which prohibits
// callback overwriting in this object or it's prototype chain.
// callback overwriting in this object or its prototype chain.
// This mechanism is needed for instance in a browser setting, where
// certain accessors such as window.location should not be allowed
// to be overwritten because allowing overwriting could potentially
@ -4730,7 +4752,7 @@ MaybeObject* JSObject::DefineFastAccessor(String* name,
// If the property is not a JavaScript accessor, fall back to the slow case.
if (result.type() != CALLBACKS) return GetHeap()->null_value();
Object* callback_value = result.GetValue();
Object* callback_value = result.GetCallbackObject();
if (!callback_value->IsAccessorPair()) return GetHeap()->null_value();
AccessorPair* accessors = AccessorPair::cast(callback_value);
@ -4796,9 +4818,12 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
// Accessors overwrite previous callbacks (cf. with getters/setters).
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
@ -5915,21 +5940,15 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
int index = Search(descriptor->GetKey());
const bool replacing = (index != kNotFound);
bool keep_enumeration_index = false;
if (replacing) {
// We are replacing an existing descriptor. We keep the enumeration
// index of a visible property.
PropertyType t = GetDetails(index).type();
if (t == CONSTANT_FUNCTION ||
t == FIELD ||
t == CALLBACKS ||
t == INTERCEPTOR) {
keep_enumeration_index = true;
} else if (remove_transitions) {
// Replaced descriptor has been counted as removed if it is
// a transition that will be replaced. Adjust count in this case.
++new_size;
}
} else {
if (!replacing) {
++new_size;
} else if (!IsTransitionOnly(index)) {
// We are replacing an existing descriptor. We keep the enumeration index
// of a visible property.
keep_enumeration_index = true;
} else if (remove_transitions) {
// Replaced descriptor has been counted as removed if it is a transition
// that will be replaced. Adjust count in this case.
++new_size;
}
@ -8587,7 +8606,7 @@ void Code::Disassemble(const char* name, FILE* out) {
MaybeObject* JSObject::SetFastElementsCapacityAndLength(
int capacity,
int length,
SetFastElementsCapacityMode set_capacity_mode) {
SetFastElementsCapacitySmiMode smi_mode) {
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
@ -8598,34 +8617,40 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
if (!maybe->To(&new_elements)) return maybe;
}
// Find the new map to use for this object if there is a map change.
Map* new_map = NULL;
if (elements()->map() != heap->non_strict_arguments_elements_map()) {
// The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
// it, or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_only_elements =
(set_capacity_mode == kForceSmiOnlyElements) ||
((set_capacity_mode == kAllowSmiOnlyElements) &&
(elements()->map()->has_fast_smi_only_elements() ||
elements() == heap->empty_fixed_array()));
ElementsKind elements_kind = has_fast_smi_only_elements
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
if (!maybe->To(&new_map)) return maybe;
ElementsKind elements_kind = GetElementsKind();
ElementsKind new_elements_kind;
// The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it,
// or if it's allowed and the old elements array contained only SMIs.
bool has_fast_smi_elements =
(smi_mode == kForceSmiElements) ||
((smi_mode == kAllowSmiElements) && HasFastSmiElements());
if (has_fast_smi_elements) {
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_SMI_ELEMENTS;
} else {
new_elements_kind = FAST_SMI_ELEMENTS;
}
} else {
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_ELEMENTS;
} else {
new_elements_kind = FAST_ELEMENTS;
}
}
FixedArrayBase* old_elements = elements();
ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
? FAST_SMI_ONLY_ELEMENTS
: FAST_ELEMENTS;
{ MaybeObject* maybe_obj =
accessor->CopyElements(this, new_elements, to_kind);
accessor->CopyElements(this, new_elements, new_elements_kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
Map* new_map = map();
if (new_elements_kind != elements_kind) {
MaybeObject* maybe =
GetElementsTransitionMap(GetIsolate(), new_elements_kind);
if (!maybe->To(&new_map)) return maybe;
}
ValidateElements();
set_map_and_elements(new_map, new_elements);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@ -8637,11 +8662,9 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
GetElementsKind(), new_elements);
}
// Update the length if necessary.
if (IsJSArray()) {
JSArray::cast(this)->set_length(Smi::FromInt(length));
}
return new_elements;
}
@ -8659,20 +8682,28 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (!maybe_obj->To(&elems)) return maybe_obj;
}
ElementsKind elements_kind = GetElementsKind();
ElementsKind new_elements_kind = elements_kind;
if (IsHoleyElementsKind(elements_kind)) {
new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
} else {
new_elements_kind = FAST_DOUBLE_ELEMENTS;
}
Map* new_map;
{ MaybeObject* maybe_obj =
GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
GetElementsTransitionMap(heap->isolate(), new_elements_kind);
if (!maybe_obj->To(&new_map)) return maybe_obj;
}
FixedArrayBase* old_elements = elements();
ElementsKind elements_kind = GetElementsKind();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
{ MaybeObject* maybe_obj =
accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
ValidateElements();
set_map_and_elements(new_map, elems);
} else {
FixedArray* parameter_map = FixedArray::cast(old_elements);
@ -8681,7 +8712,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, elements_kind, old_elements,
FAST_DOUBLE_ELEMENTS, elems);
GetElementsKind(), elems);
}
if (IsJSArray()) {
@ -8961,8 +8992,10 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@ -8973,7 +9006,8 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
}
break;
}
case FAST_DOUBLE_ELEMENTS: {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
@ -9257,7 +9291,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
Object* value,
StrictModeFlag strict_mode,
bool check_prototype) {
ASSERT(HasFastTypeElements() ||
ASSERT(HasFastSmiOrObjectElements() ||
HasFastArgumentsElements());
FixedArray* backing_store = FixedArray::cast(elements());
@ -9283,13 +9317,29 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
// Check if the length property of this object needs to be updated.
uint32_t array_length = 0;
bool must_update_array_length = false;
bool introduces_holes = true;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
introduces_holes = index > array_length;
if (index >= array_length) {
must_update_array_length = true;
array_length = index + 1;
}
} else {
introduces_holes = index >= capacity;
}
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
ElementsKind elements_kind = GetElementsKind();
if (introduces_holes &&
IsFastElementsKind(elements_kind) &&
!IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
if (maybe->IsFailure()) return maybe;
}
// Check if the capacity of the backing store needs to be increased, or if
// a transition to slow elements is necessary.
if (index >= capacity) {
@ -9309,42 +9359,44 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
}
}
// Convert to fast double elements if appropriate.
if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
if (maybe->IsFailure()) return maybe;
FixedDoubleArray::cast(elements())->set(index, value->Number());
ValidateElements();
return value;
}
// Change elements kind from SMI_ONLY to generic FAST if necessary.
if (HasFastSmiOnlyElements() && !value->IsSmi()) {
// Change elements kind from Smi-only to generic FAST if necessary.
if (HasFastSmiElements() && !value->IsSmi()) {
Map* new_map;
{ MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
FAST_ELEMENTS);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
}
ElementsKind kind = HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
kind);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
set_map(new_map);
if (FLAG_trace_elements_transitions) {
PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
FAST_ELEMENTS, elements());
}
}
// Increase backing store capacity if that's been decided previously.
if (new_capacity != capacity) {
FixedArray* new_elements;
SetFastElementsCapacityMode set_capacity_mode =
value->IsSmi() && HasFastSmiOnlyElements()
? kAllowSmiOnlyElements
: kDontAllowSmiOnlyElements;
SetFastElementsCapacitySmiMode smi_mode =
value->IsSmi() && HasFastSmiElements()
? kAllowSmiElements
: kDontAllowSmiElements;
{ MaybeObject* maybe =
SetFastElementsCapacityAndLength(new_capacity,
array_length,
set_capacity_mode);
smi_mode);
if (!maybe->To(&new_elements)) return maybe;
}
new_elements->set(index, value);
ValidateElements();
return value;
}
// Finally, set the new element and length.
ASSERT(elements()->IsFixedArray());
backing_store->set(index, value);
@ -9468,20 +9520,21 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
} else {
new_length = dictionary->max_number_key() + 1;
}
SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
? kAllowSmiOnlyElements
: kDontAllowSmiOnlyElements;
SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays
? kAllowSmiElements
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
set_capacity_mode = kForceSmiOnlyElements;
smi_mode = kForceSmiElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
: SetFastElementsCapacityAndLength(new_length,
new_length,
set_capacity_mode);
smi_mode);
ValidateElements();
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@ -9520,27 +9573,40 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
// If the value object is not a heap number, switch to fast elements and try
// again.
bool value_is_smi = value->IsSmi();
bool introduces_holes = true;
uint32_t length = elms_length;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
introduces_holes = index > length;
} else {
introduces_holes = index >= elms_length;
}
if (!value->IsNumber()) {
Object* obj;
uint32_t length = elms_length;
if (IsJSArray()) {
CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
}
MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
elms_length,
length,
kDontAllowSmiOnlyElements);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
return SetFastElement(index,
value,
strict_mode,
check_prototype);
kDontAllowSmiElements);
if (maybe_obj->IsFailure()) return maybe_obj;
maybe_obj = SetFastElement(index, value, strict_mode, check_prototype);
if (maybe_obj->IsFailure()) return maybe_obj;
ValidateElements();
return maybe_obj;
}
double double_value = value_is_smi
? static_cast<double>(Smi::cast(value)->value())
: HeapNumber::cast(value)->value();
// If the array is growing, and it's not growth by a single element at the
// end, make sure that the ElementsKind is HOLEY.
ElementsKind elements_kind = GetElementsKind();
if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) {
ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind);
MaybeObject* maybe = TransitionElementsKind(transitioned_kind);
if (maybe->IsFailure()) return maybe;
}
// Check whether there is extra space in the fixed array.
if (index < elms_length) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
@ -9562,13 +9628,11 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement(
int new_capacity = NewElementsCapacity(index+1);
if (!ShouldConvertToSlowElements(new_capacity)) {
ASSERT(static_cast<uint32_t>(new_capacity) > index);
Object* obj;
{ MaybeObject* maybe_obj =
SetFastDoubleElementsCapacityAndLength(new_capacity,
index + 1);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
MaybeObject* maybe_obj =
SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1);
if (maybe_obj->IsFailure()) return maybe_obj;
FixedDoubleArray::cast(elements())->set(index, double_value);
ValidateElements();
return value;
}
}
@ -9712,10 +9776,13 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return SetFastElement(index, value, strict_mode, check_prototype);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
return SetFastDoubleElement(index, value, strict_mode, check_prototype);
case EXTERNAL_PIXEL_ELEMENTS: {
ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
@ -9806,11 +9873,19 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
ElementsKind from_kind = map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
Isolate* isolate = GetIsolate();
if ((from_kind == FAST_SMI_ONLY_ELEMENTS ||
elements() == isolate->heap()->empty_fixed_array()) &&
to_kind == FAST_ELEMENTS) {
ASSERT(from_kind != FAST_ELEMENTS);
if (elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
IsFastSmiOrObjectElementsKind(to_kind)) ||
(from_kind == FAST_DOUBLE_ELEMENTS &&
to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
// No change is needed to the elements() buffer, the transition
// only requires a map change.
MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
Map* new_map;
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@ -9837,18 +9912,21 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
}
}
if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
to_kind == FAST_DOUBLE_ELEMENTS) {
if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
MaybeObject* maybe_result =
SetFastDoubleElementsCapacityAndLength(capacity, length);
if (maybe_result->IsFailure()) return maybe_result;
ValidateElements();
return this;
}
if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
capacity, length, kDontAllowSmiOnlyElements);
capacity, length, kDontAllowSmiElements);
if (maybe_result->IsFailure()) return maybe_result;
ValidateElements();
return this;
}
@ -9862,10 +9940,14 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
// static
bool Map::IsValidElementsTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return
(from_kind == FAST_SMI_ONLY_ELEMENTS &&
(to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
(from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
// Transitions can't go backwards.
if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
return false;
}
// Transitions from HOLEY -> PACKED are not allowed.
return !IsFastHoleyElementsKind(from_kind) ||
IsFastHoleyElementsKind(to_kind);
}
@ -9956,8 +10038,10 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
break;
}
// Fall through.
case FAST_SMI_ONLY_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
backing_store = FixedArray::cast(backing_store_base);
*capacity = backing_store->length();
for (int i = 0; i < *capacity; ++i) {
@ -9971,7 +10055,8 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) {
*used = dictionary->NumberOfElements();
break;
}
case FAST_DOUBLE_ELEMENTS: {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
*capacity = elms->length();
for (int i = 0; i < *capacity; i++) {
@ -10241,16 +10326,19 @@ bool JSObject::HasRealElementProperty(uint32_t index) {
if (this->IsStringObjectWithCharacterAt(index)) return true;
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
uint32_t length = IsJSArray() ?
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
return (index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole();
}
case FAST_DOUBLE_ELEMENTS: {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@ -10450,7 +10538,7 @@ int JSObject::NumberOfLocalElements(PropertyAttributes filter) {
int JSObject::NumberOfEnumElements() {
// Fast case for objects with no elements.
if (!IsJSValue() && HasFastElements()) {
if (!IsJSValue() && HasFastObjectElements()) {
uint32_t length = IsJSArray() ?
static_cast<uint32_t>(
Smi::cast(JSArray::cast(this)->length())->value()) :
@ -10466,8 +10554,10 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
PropertyAttributes filter) {
int counter = 0;
switch (GetElementsKind()) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedArray::cast(elements())->length();
@ -10482,7 +10572,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
ASSERT(!storage || storage->length() >= counter);
break;
}
case FAST_DOUBLE_ELEMENTS: {
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedDoubleArray::cast(elements())->length();
@ -11415,10 +11506,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
// Convert to fast elements.
Object* obj;
{ MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
FAST_ELEMENTS);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
FAST_HOLEY_ELEMENTS);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
Map* new_map = Map::cast(obj);
PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED;
@ -11429,9 +11519,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
}
FixedArray* fast_elements = FixedArray::cast(new_array);
dict->CopyValuesTo(fast_elements);
ValidateElements();
set_map(new_map);
set_elements(fast_elements);
set_map_and_elements(new_map, fast_elements);
} else if (HasExternalArrayElements()) {
// External arrays cannot have holes or undefined elements.
return Smi::FromInt(ExternalArray::cast(elements())->length());
@ -11441,7 +11531,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
}
ASSERT(HasFastTypeElements() || HasFastDoubleElements());
ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements());
// Collect holes at the end, undefined before that and the rest at the
// start, and return the number of non-hole, non-undefined values.

119
deps/v8/src/objects.h

@ -30,6 +30,7 @@
#include "allocation.h"
#include "builtins.h"
#include "elements-kind.h"
#include "list.h"
#include "property-details.h"
#include "smart-array-pointer.h"
@ -131,40 +132,6 @@
namespace v8 {
namespace internal {
enum ElementsKind {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
FAST_SMI_ONLY_ELEMENTS,
// The "fast" kind for tagged values. Must be second to make it possible to
// efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
// together at once.
FAST_ELEMENTS,
// The "fast" kind for unwrapped, non-tagged double values.
FAST_DOUBLE_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
NON_STRICT_ARGUMENTS_ELEMENTS,
// The "fast" kind for external arrays
EXTERNAL_BYTE_ELEMENTS,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
EXTERNAL_SHORT_ELEMENTS,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
EXTERNAL_INT_ELEMENTS,
EXTERNAL_UNSIGNED_INT_ELEMENTS,
EXTERNAL_FLOAT_ELEMENTS,
EXTERNAL_DOUBLE_ELEMENTS,
EXTERNAL_PIXEL_ELEMENTS,
// Derived constants from ElementsKind
FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
};
enum CompareMapMode {
REQUIRE_EXACT_MAP,
ALLOW_ELEMENT_TRANSITION_MAPS
@ -175,13 +142,6 @@ enum KeyedAccessGrowMode {
ALLOW_JSARRAY_GROWTH
};
const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
void PrintElementsKind(FILE* out, ElementsKind kind);
inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
// Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@ -1510,13 +1470,19 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT inline MaybeObject* ResetElements();
inline ElementsKind GetElementsKind();
inline ElementsAccessor* GetElementsAccessor();
inline bool HasFastSmiOnlyElements();
inline bool HasFastElements();
// Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
// elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and
// HasFastElements to HasFastObjectElements.
inline bool HasFastTypeElements();
// Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind.
inline bool HasFastSmiElements();
// Returns true if an object has elements of FAST_ELEMENTS ElementsKind.
inline bool HasFastObjectElements();
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
// Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS
// ElementsKind.
inline bool HasFastHoleyElements();
inline bool HasNonStrictArgumentsElements();
inline bool HasDictionaryElements();
inline bool HasExternalPixelElements();
@ -1719,7 +1685,7 @@ class JSObject: public JSReceiver {
static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
inline void ValidateSmiOnlyElements();
inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
@ -1731,6 +1697,7 @@ class JSObject: public JSReceiver {
EnsureElementsMode mode);
MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
FixedArrayBase* elements,
uint32_t length,
EnsureElementsMode mode);
MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
Arguments* arguments,
@ -1829,10 +1796,10 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
uint32_t index);
enum SetFastElementsCapacityMode {
kAllowSmiOnlyElements,
kForceSmiOnlyElements,
kDontAllowSmiOnlyElements
enum SetFastElementsCapacitySmiMode {
kAllowSmiElements,
kForceSmiElements,
kDontAllowSmiElements
};
// Replace the elements' backing store with fast elements of the given
@ -1841,7 +1808,7 @@ class JSObject: public JSReceiver {
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
int capacity,
int length,
SetFastElementsCapacityMode set_capacity_mode);
SetFastElementsCapacitySmiMode smi_mode);
MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
int capacity,
int length);
@ -4647,17 +4614,21 @@ class Map: public HeapObject {
}
// Tells whether the instance has fast elements that are only Smis.
inline bool has_fast_smi_only_elements() {
return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
inline bool has_fast_smi_elements() {
return IsFastSmiElementsKind(elements_kind());
}
// Tells whether the instance has fast elements.
inline bool has_fast_elements() {
return elements_kind() == FAST_ELEMENTS;
inline bool has_fast_object_elements() {
return IsFastObjectElementsKind(elements_kind());
}
inline bool has_fast_smi_or_object_elements() {
return IsFastSmiOrObjectElementsKind(elements_kind());
}
inline bool has_fast_double_elements() {
return elements_kind() == FAST_DOUBLE_ELEMENTS;
return IsFastDoubleElementsKind(elements_kind());
}
inline bool has_non_strict_arguments_elements() {
@ -4855,6 +4826,14 @@ class Map: public HeapObject {
Handle<Map> FindTransitionedMap(MapHandleList* candidates);
Map* FindTransitionedMap(MapList* candidates);
// Zaps the contents of backing data structures in debug mode. Note that the
// heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
// holding weak references when incremental marking is used, because it also
// iterates over objects that are otherwise unreachable.
#ifdef DEBUG
void ZapInstanceDescriptors();
void ZapPrototypeTransitions();
#endif
// Dispatched behavior.
#ifdef OBJECT_PRINT
@ -4945,25 +4924,31 @@ class Map: public HeapObject {
// Bit positions for bit field 2
static const int kIsExtensible = 0;
static const int kFunctionWithPrototype = 1;
static const int kStringWrapperSafeForDefaultValueOf = 2;
static const int kAttachedToSharedFunctionInfo = 3;
static const int kStringWrapperSafeForDefaultValueOf = 1;
static const int kAttachedToSharedFunctionInfo = 2;
// No bits can be used after kElementsKindFirstBit, they are all reserved for
// storing ElementKind.
static const int kElementsKindShift = 4;
static const int kElementsKindBitCount = 4;
static const int kElementsKindShift = 3;
static const int kElementsKindBitCount = 5;
// Derived values from bit field 2
static const int kElementsKindMask = (-1 << kElementsKindShift) &
((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
(FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
static const int8_t kMaximumBitField2FastSmiElementValue =
static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
static const int8_t kMaximumBitField2FastHoleyElementValue =
static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
static const int8_t kMaximumBitField2FastHoleySmiElementValue =
static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
Map::kElementsKindShift) - 1;
// Bit positions for bit field 3
static const int kIsShared = 0;
static const int kFunctionWithPrototype = 1;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@ -7243,6 +7228,10 @@ class SeqAsciiString: public SeqString {
unsigned* offset,
unsigned chars);
#ifdef DEBUG
void SeqAsciiStringVerify();
#endif
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
};

20
deps/v8/src/parser.cc

@ -3767,10 +3767,12 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> object_literals =
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
ElementsKind elements_kind = FAST_SMI_ELEMENTS;
bool has_only_undefined_values = true;
bool has_hole_values = false;
// Fill in the literals.
Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
for (int i = 0, n = values->length(); i < n; i++) {
@ -3779,12 +3781,18 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
depth = m_literal->depth() + 1;
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsUndefined()) {
if (boilerplate_value->IsTheHole()) {
has_hole_values = true;
object_literals->set_the_hole(i);
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
double_literals->set_the_hole(i);
}
} else if (boilerplate_value->IsUndefined()) {
is_simple = false;
object_literals->set(i, Smi::FromInt(0));
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
double_literals->set(i, 0);
}
} else {
// Examine each literal element, and adjust the ElementsKind if the
// literal element is not of a type that can be stored in the current
@ -3794,7 +3802,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// ultimately end up in FAST_ELEMENTS.
has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
if (elements_kind == FAST_SMI_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
// FAST_ELEMENTS is required.
if (!boilerplate_value->IsSmi()) {
@ -3842,7 +3850,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
elements_kind != FAST_DOUBLE_ELEMENTS) {
object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
object_literals->set_map(heap->fixed_cow_array_map());
}
Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
@ -3854,6 +3862,10 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
Handle<FixedArray> literals =
isolate()->factory()->NewFixedArray(2, TENURED);
if (has_hole_values || !FLAG_packed_arrays) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
literals->set(0, Smi::FromInt(elements_kind));
literals->set(1, *element_values);

33
deps/v8/src/platform-posix.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -422,9 +422,9 @@ Socket* POSIXSocket::Accept() const {
}
int socket;
do
do {
socket = accept(socket_, NULL, NULL);
while (socket == -1 && errno == EINTR);
} while (socket == -1 && errno == EINTR);
if (socket == -1) {
return NULL;
@ -452,10 +452,9 @@ bool POSIXSocket::Connect(const char* host, const char* port) {
}
// Connect.
do
do {
status = connect(socket_, result->ai_addr, result->ai_addrlen);
while (status == -1 && errno == EINTR);
} while (status == -1 && errno == EINTR);
freeaddrinfo(result);
return status == 0;
}
@ -474,33 +473,29 @@ bool POSIXSocket::Shutdown() {
int POSIXSocket::Send(const char* data, int len) const {
int written;
for (written = 0; written < len; /* empty */) {
if (len <= 0) return 0;
int written = 0;
while (written < len) {
int status = send(socket_, data + written, len - written, 0);
if (status == 0) {
break;
} else if (status > 0) {
written += status;
} else if (errno == EINTR) {
/* interrupted by signal, retry */
} else {
return -1;
} else if (errno != EINTR) {
return 0;
}
}
return written;
}
int POSIXSocket::Receive(char* data, int len) const {
if (len <= 0) return 0;
int status;
do
do {
status = recv(socket_, data, len, 0);
while (status == -1 && errno == EINTR);
return status;
} while (status == -1 && errno == EINTR);
return (status < 0) ? 0 : status;
}

18
deps/v8/src/platform-win32.cc

@ -1848,14 +1848,26 @@ bool Win32Socket::Shutdown() {
int Win32Socket::Send(const char* data, int len) const {
int status = send(socket_, data, len, 0);
return status;
if (len <= 0) return 0;
int written = 0;
while (written < len) {
int status = send(socket_, data + written, len - written, 0);
if (status == 0) {
break;
} else if (status > 0) {
written += status;
} else {
return 0;
}
}
return written;
}
int Win32Socket::Receive(char* data, int len) const {
if (len <= 0) return 0;
int status = recv(socket_, data, len, 0);
return status;
return (status == SOCKET_ERROR) ? 0 : status;
}

3
deps/v8/src/platform.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -653,6 +653,7 @@ class Socket {
virtual bool Shutdown() = 0;
// Data Transimission
// Return 0 on failure.
virtual int Send(const char* data, int len) const = 0;
virtual int Receive(char* data, int len) const = 0;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save