Browse Source

Upgrade v8 to 3.9.17

v0.9.1-release
isaacs 13 years ago
parent
commit
e4fc2cbfd3
  1. 80
      deps/v8/ChangeLog
  2. 4
      deps/v8/Makefile
  3. 7
      deps/v8/SConstruct
  4. 3
      deps/v8/build/common.gypi
  5. 11
      deps/v8/include/v8-profiler.h
  6. 23
      deps/v8/include/v8.h
  7. 2
      deps/v8/src/SConscript
  8. 2
      deps/v8/src/allocation.h
  9. 28
      deps/v8/src/api.cc
  10. 10
      deps/v8/src/arm/assembler-arm.cc
  11. 7
      deps/v8/src/arm/builtins-arm.cc
  12. 78
      deps/v8/src/arm/code-stubs-arm.cc
  13. 13
      deps/v8/src/arm/codegen-arm.cc
  14. 126
      deps/v8/src/arm/deoptimizer-arm.cc
  15. 69
      deps/v8/src/arm/full-codegen-arm.cc
  16. 8
      deps/v8/src/arm/ic-arm.cc
  17. 443
      deps/v8/src/arm/lithium-arm.cc
  18. 55
      deps/v8/src/arm/lithium-arm.h
  19. 192
      deps/v8/src/arm/lithium-codegen-arm.cc
  20. 2
      deps/v8/src/arm/lithium-codegen-arm.h
  21. 2
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  22. 100
      deps/v8/src/arm/stub-cache-arm.cc
  23. 11
      deps/v8/src/assembler.cc
  24. 3
      deps/v8/src/assembler.h
  25. 131
      deps/v8/src/ast.cc
  26. 188
      deps/v8/src/ast.h
  27. 2
      deps/v8/src/bootstrapper.cc
  28. 56
      deps/v8/src/builtins.cc
  29. 9
      deps/v8/src/codegen.h
  30. 2
      deps/v8/src/compiler.cc
  31. 3
      deps/v8/src/d8.gyp
  32. 384
      deps/v8/src/date.cc
  33. 260
      deps/v8/src/date.h
  34. 669
      deps/v8/src/date.js
  35. 5
      deps/v8/src/debug-debugger.js
  36. 40
      deps/v8/src/deoptimizer.cc
  37. 23
      deps/v8/src/deoptimizer.h
  38. 659
      deps/v8/src/elements.cc
  39. 88
      deps/v8/src/elements.h
  40. 12
      deps/v8/src/execution.cc
  41. 9
      deps/v8/src/execution.h
  42. 28
      deps/v8/src/flag-definitions.h
  43. 2
      deps/v8/src/frames-inl.h
  44. 49
      deps/v8/src/frames.cc
  45. 5
      deps/v8/src/frames.h
  46. 73
      deps/v8/src/full-codegen.cc
  47. 3
      deps/v8/src/full-codegen.h
  48. 3
      deps/v8/src/global-handles.cc
  49. 8
      deps/v8/src/global-handles.h
  50. 16
      deps/v8/src/globals.h
  51. 9
      deps/v8/src/heap-inl.h
  52. 72
      deps/v8/src/heap.cc
  53. 20
      deps/v8/src/heap.h
  54. 168
      deps/v8/src/hydrogen-instructions.cc
  55. 155
      deps/v8/src/hydrogen-instructions.h
  56. 483
      deps/v8/src/hydrogen.cc
  57. 80
      deps/v8/src/hydrogen.h
  58. 7
      deps/v8/src/ia32/builtins-ia32.cc
  59. 135
      deps/v8/src/ia32/code-stubs-ia32.cc
  60. 3
      deps/v8/src/ia32/code-stubs-ia32.h
  61. 48
      deps/v8/src/ia32/codegen-ia32.cc
  62. 139
      deps/v8/src/ia32/deoptimizer-ia32.cc
  63. 158
      deps/v8/src/ia32/full-codegen-ia32.cc
  64. 208
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  65. 2
      deps/v8/src/ia32/lithium-codegen-ia32.h
  66. 60
      deps/v8/src/ia32/lithium-ia32.cc
  67. 42
      deps/v8/src/ia32/lithium-ia32.h
  68. 2
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  69. 96
      deps/v8/src/ia32/stub-cache-ia32.cc
  70. 16
      deps/v8/src/ic.cc
  71. 226
      deps/v8/src/interface.cc
  72. 156
      deps/v8/src/interface.h
  73. 8
      deps/v8/src/isolate.cc
  74. 15
      deps/v8/src/isolate.h
  75. 12
      deps/v8/src/jsregexp.cc
  76. 4
      deps/v8/src/jsregexp.h
  77. 16
      deps/v8/src/lithium-allocator.cc
  78. 9
      deps/v8/src/lithium.h
  79. 40
      deps/v8/src/macros.py
  80. 4
      deps/v8/src/mark-compact-inl.h
  81. 4
      deps/v8/src/mark-compact.cc
  82. 9
      deps/v8/src/mark-compact.h
  83. 30
      deps/v8/src/messages.js
  84. 19
      deps/v8/src/mips/builtins-mips.cc
  85. 53
      deps/v8/src/mips/code-stubs-mips.cc
  86. 13
      deps/v8/src/mips/codegen-mips.cc
  87. 157
      deps/v8/src/mips/deoptimizer-mips.cc
  88. 78
      deps/v8/src/mips/full-codegen-mips.cc
  89. 13
      deps/v8/src/mips/ic-mips.cc
  90. 236
      deps/v8/src/mips/lithium-codegen-mips.cc
  91. 2
      deps/v8/src/mips/lithium-codegen-mips.h
  92. 461
      deps/v8/src/mips/lithium-mips.cc
  93. 78
      deps/v8/src/mips/lithium-mips.h
  94. 42
      deps/v8/src/mips/macro-assembler-mips.cc
  95. 4
      deps/v8/src/mips/macro-assembler-mips.h
  96. 2
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  97. 131
      deps/v8/src/mips/stub-cache-mips.cc
  98. 50
      deps/v8/src/objects-debug.cc
  99. 29
      deps/v8/src/objects-inl.h
  100. 27
      deps/v8/src/objects-printer.cc

80
deps/v8/ChangeLog

@ -1,6 +1,74 @@
2012-03-12: Version 3.9.17
Fixed VFP detection through compiler defines. (issue 1996)
Add Code-related fields to postmortem metadata.
Performance and stability improvements on all platforms.
2012-03-09: Version 3.9.16
Added basic interface inference for modules (behind the --harmony flag).
Added Object.is, Number.isFinite, Number.isNaN.
Updated the Unicode tables to Unicode version 6.1.0.
Performance and stability improvements on all platforms.
2012-03-06: Version 3.9.15
Fix the heap profiler crash caused by memory layout changes between
passes.
Fix Error.prototype.toString to throw TypeError. (issue 1980)
Fix double-rounding in strtod for MinGW. (issue 1062)
Fix corrupted snapshot serializaton on ia32. (Chromium issue v8/1985)
Performance and stability improvements on all platforms.
2012-03-01: Version 3.9.14
Performance and stability improvements on all platforms.
2012-02-29: Version 3.9.13
Added code kind check before preparing for OSR. (issue 1900, 115073)
Fixed issue 1802: Pass zone explicitly to zone-allocation on x64 and
ARM.
Ported string construct stub to x64. (issue 849)
Performance and stability improvements on all platforms.
2012-02-28: Version 3.9.12
Fixed the negative lookup stub to handle deleted entries in a
dictionary. (issue 1964)
Added a new API where the host can supply a callback function. The
callback function can resolve the location of a return address on stack
to the location where a return-address rewriting profiler stashed the
original return address.
Fixed Chromium issue http://crbug.com/115646: When compiling for-in
pass correct context value to the increment instruction.
Fixed issue 1853: Update breakpoints set with partial file name after
compile.
2012-02-27: Version 3.9.11
Make 'module' a context-sensitive keyword (V8 issue 1957).
Made 'module' a context-sensitive keyword (V8 issue 1957).
2012-02-24: Version 3.9.10
@ -55,11 +123,11 @@
2012-02-14: Version 3.9.6
Fix template-related linker error. (issue 1936)
Fixed template-related linker error. (issue 1936)
Allow inlining of functions containing object literals. (issue 1322)
Allowed inlining of functions containing object literals. (issue 1322)
Add --call-graph-size option to tickprocessor. (issue 1937)
Added --call-graph-size option to tickprocessor. (issue 1937)
Heap Snapshot maximum size limit is too low for really big apps. At the
moment the limit is 256MB. (Chromium issue 113015)
@ -93,7 +161,7 @@
2012-02-06: Version 3.9.2
Add timestamp to --trace-gc output. (issue 1932)
Added timestamp to --trace-gc output. (issue 1932)
Heap profiler reports implicit references.
@ -115,7 +183,7 @@
2012-02-01: Version 3.9.0
Reduce memory use immediately after starting V8.
Reduced memory use immediately after starting V8.
Stability fixes and performance improvements on all platforms.

4
deps/v8/Makefile

@ -75,6 +75,10 @@ ifeq ($(vfp3), off)
else
GYPFLAGS += -Dv8_can_use_vfp_instructions=true
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)

7
deps/v8/SConstruct

@ -300,7 +300,12 @@ V8_EXTRA_FLAGS = {
'-Wnon-virtual-dtor']
},
'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long', '-Wno-pedantic-ms-format']
'WARNINGFLAGS': ['-pedantic',
'-Wno-long-long',
'-Wno-pedantic-ms-format'],
'library:shared': {
'LIBS': ['winmm', 'ws2_32']
}
},
'os:linux': {
'WARNINGFLAGS': ['-pedantic'],

3
deps/v8/build/common.gypi

@ -305,7 +305,7 @@
'cflags': [ '-I/usr/pkg/include' ],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wno-unused-parameter',
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
@ -352,6 +352,7 @@
}], # OS=="mac"
['OS=="win"', {
'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},

11
deps/v8/include/v8-profiler.h

@ -284,14 +284,8 @@ class V8EXPORT HeapGraphNode {
* the objects that are reachable only from this object. In other
* words, the size of memory that will be reclaimed having this node
* collected.
*
* Exact retained size calculation has O(N) (number of nodes)
* computational complexity, while approximate has O(1). It is
* assumed that initially heap profiling tools provide approximate
* sizes for all nodes, and then exact sizes are calculated for the
* most 'interesting' nodes.
*/
int GetRetainedSize(bool exact) const;
int GetRetainedSize() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
@ -436,6 +430,9 @@ class V8EXPORT HeapProfiler {
* handle.
*/
static const uint16_t kPersistentHandleNoClassId = 0;
/** Returns the number of currently existing persistent handles. */
static int GetPersistentHandleCount();
};

23
deps/v8/include/v8.h

@ -2857,6 +2857,20 @@ class V8EXPORT StartupDataDecompressor { // NOLINT
typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
/**
* ReturnAddressLocationResolver is used as a callback function when v8 is
* resolving the location of a return address on the stack. Profilers that
* change the return address on the stack can use this to resolve the stack
* location to whereever the profiler stashed the original return address.
* When invoked, return_addr_location will point to a location on stack where
* a machine return address resides, this function should return either the
* same pointer, or a pointer to the profiler's copy of the original return
* address.
*/
typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location);
/**
* Interface for iterating though all external resources in the heap.
*/
@ -3110,6 +3124,13 @@ class V8EXPORT V8 {
*/
static void SetEntropySource(EntropySource source);
/**
* Allows the host application to provide a callback that allows v8 to
* cooperate with a profiler that rewrites return addresses on stack.
*/
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver);
/**
* Adjusts the amount of registered external memory. Used to give
* V8 an indication of the amount of externally allocated memory
@ -3850,7 +3871,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa9;
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;

2
deps/v8/src/SConscript

@ -59,6 +59,7 @@ SOURCES = {
counters.cc
cpu-profiler.cc
data-flow.cc
date.cc
dateparser.cc
debug-agent.cc
debug.cc
@ -84,6 +85,7 @@ SOURCES = {
hydrogen-instructions.cc
ic.cc
incremental-marking.cc
interface.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc

2
deps/v8/src/allocation.h

@ -80,7 +80,7 @@ class AllStatic {
template <typename T>
T* NewArray(int size) {
T* NewArray(size_t size) {
T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result;

28
deps/v8/src/api.cc

@ -525,7 +525,8 @@ Extension::Extension(const char* name,
int source_length)
: name_(name),
source_length_(source_length >= 0 ?
source_length : (source ? strlen(source) : 0)),
source_length :
(source ? static_cast<int>(strlen(source)) : 0)),
source_(source, source_length_),
dep_count_(dep_count),
deps_(deps),
@ -4026,6 +4027,12 @@ void v8::V8::SetEntropySource(EntropySource source) {
}
void v8::V8::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver) {
i::V8::SetReturnAddressLocationResolver(return_address_resolver);
}
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@ -4728,8 +4735,8 @@ double v8::Date::NumberValue() const {
if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
LOG_API(isolate, "Date::NumberValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
return jsvalue->value()->Number();
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
return jsdate->value()->Number();
}
@ -4740,8 +4747,10 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
ENTER_V8(isolate);
isolate->date_cache()->ResetDateCache();
i::HandleScope scope(isolate);
// Get the function ResetDateCache (defined in date-delay.js).
// Get the function ResetDateCache (defined in date.js).
i::Handle<i::String> func_name_str =
isolate->factory()->LookupAsciiSymbol("ResetDateCache");
i::MaybeObject* result =
@ -5867,10 +5876,10 @@ int HeapGraphNode::GetSelfSize() const {
}
int HeapGraphNode::GetRetainedSize(bool exact) const {
int HeapGraphNode::GetRetainedSize() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->RetainedSize(exact);
return ToInternal(this)->retained_size();
}
@ -5972,7 +5981,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
}
@ -6065,6 +6074,11 @@ void HeapProfiler::DefineWrapperClass(uint16_t class_id,
}
int HeapProfiler::GetPersistentHandleCount() {
i::Isolate* isolate = i::Isolate::Current();
return isolate->global_handles()->NumberOfGlobalHandles();
}
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;

10
deps/v8/src/arm/assembler-arm.cc

@ -66,11 +66,13 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross
// compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
// generation even when generating snapshots. ARMv7 and hardware floating
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
#endif // def __arm__
return answer;

7
deps/v8/src/arm/builtins-arm.cc

@ -978,6 +978,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
// Store offset of return address for deoptimizer.
if (!is_api_function && !count_constructions) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
// r0: result
// sp[0]: receiver
@ -1740,7 +1745,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ Call(r3);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Jump(lr);

78
deps/v8/src/arm/code-stubs-arm.cc

@ -5930,8 +5930,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
// Update instance type.
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
@ -5969,8 +5969,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
__ bind(&set_slice_header);
__ mov(r3, Operand(r3, LSL, 1));
__ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
__ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ jmp(&return_r0);
__ bind(&copy_routine);
@ -6560,15 +6560,15 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ b(ne, &maybe_undefined1);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ b(ne, &maybe_undefined2);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or VFP3 is unsupported.
@ -6592,14 +6592,28 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(r0, Operand(LESS), LeaveCC, lt);
__ mov(r0, Operand(GREATER), LeaveCC, gt);
__ Ret();
__ bind(&unordered);
}
__ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
}
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
__ b(eq, &unordered);
}
__ bind(&miss);
GenerateMiss(masm);
}
@ -6647,6 +6661,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
// Registers containing left and right operands respectively.
Register left = r1;
Register right = r0;
@ -6680,28 +6696,39 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kSymbolTag != 0);
__ and_(tmp3, tmp1, Operand(tmp2));
__ tst(tmp3, Operand(kIsSymbolMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
__ Ret(ne);
if (equality) {
ASSERT(GetCondition() == eq);
STATIC_ASSERT(kSymbolTag != 0);
__ and_(tmp3, tmp1, Operand(tmp2));
__ tst(tmp3, Operand(kIsSymbolMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
__ Ret(ne);
}
// Check that both strings are sequential ASCII.
Label runtime;
__ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
&runtime);
__ JumpIfBothInstanceTypesAreNotSequentialAscii(
tmp1, tmp2, tmp3, tmp4, &runtime);
// Compare flat ASCII strings. Returns when done.
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2, tmp3);
if (equality) {
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2, tmp3);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(
masm, left, right, tmp1, tmp2, tmp3, tmp4);
}
// Handle more complex cases in runtime.
__ bind(&runtime);
__ Push(left, right);
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
GenerateMiss(masm);
@ -6812,7 +6839,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
// (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
@ -6840,10 +6867,17 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(eq, done);
if (i != kInlinedProbes - 1) {
// Load the hole ready for use below:
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property.
__ cmp(entity_name, Operand(Handle<String>(name)));
__ b(eq, miss);
Label the_hole;
__ cmp(entity_name, tmp);
__ b(eq, &the_hole);
// Check if the entry name is not a symbol.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
@ -6851,6 +6885,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ tst(entity_name, Operand(kIsSymbolMask));
__ b(eq, miss);
__ bind(&the_hole);
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));

13
deps/v8/src/arm/codegen-arm.cc

@ -37,6 +37,19 @@ namespace internal {
#define __ ACCESS_MASM(masm)
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
case TranscendentalCache::TAN: return &tan;
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
return NULL;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

126
deps/v8/src/arm/deoptimizer-arm.cc

@ -351,7 +351,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@ -373,16 +372,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
@ -392,7 +388,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
@ -404,7 +399,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
@ -415,7 +409,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -425,7 +418,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -445,6 +437,119 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = 7 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
// Constructor function being invoked by the stub.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
top_address + output_offset, output_offset, value);
}
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@ -557,9 +662,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
}
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);

69
deps/v8/src/arm/full-codegen-arm.cc

@ -1004,6 +1004,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
RecordTypeFeedbackCell(stmt->PrepareId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@ -1488,11 +1498,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(r0, MemOperand(sp));
__ push(r0);
VisitForStackValue(key);
__ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
__ push(r1);
VisitForStackValue(value);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
} else {
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
VisitForStackValue(value);
}
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
@ -2382,6 +2396,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(flags);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@ -2932,6 +2947,50 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label runtime, done;
Register object = r0;
Register result = r0;
Register scratch0 = r9;
Register scratch1 = r1;
#ifdef DEBUG
__ AbortIfSmi(object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
__ Assert(eq, "Trying to get date field from non-date.");
#endif
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch1, Operand(stamp));
__ ldr(scratch1, MemOperand(scratch1));
__ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
__ cmp(scratch1, scratch0);
__ b(ne, &runtime);
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch1);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();

8
deps/v8/src/arm/ic-arm.cc

@ -399,7 +399,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -438,7 +438,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
__ bind(&miss);
}
@ -706,7 +706,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags =
Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5);
masm, flags, r0, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1516,7 +1516,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r1, r2, r3, r4, r5);
masm, flags, r1, r2, r3, r4, r5, r6);
// Cache miss: Jump to runtime.
GenerateMiss(masm);

443
deps/v8/src/arm/lithium-arm.cc

File diff suppressed because it is too large

55
deps/v8/src/arm/lithium-arm.h

@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -176,8 +177,8 @@ class LCodeGen;
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
V(LoadFieldByIndex) \
V(DateField)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -989,6 +990,41 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
class LDateField: public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
inputs_[0] = date;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
Smi* index() const { return index_; }
private:
Smi* index_;
};
class LSetDateField: public LTemplateInstruction<1, 2, 1> {
public:
LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
: index_(index) {
inputs_[0] = date;
inputs_[1] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
DECLARE_HYDROGEN_ACCESSOR(DateField)
int index() const { return index_; }
private:
int index_;
};
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@ -1922,6 +1958,18 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
LAllocateObject(LOperand* temp1, LOperand* temp2) {
temps_[0] = temp1;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@ -2192,6 +2240,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@ -2221,6 +2270,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@ -2325,6 +2375,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;

192
deps/v8/src/arm/lithium-codegen-arm.cc

@ -62,7 +62,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
HPhase phase("Code generation", chunk());
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope1(VFP3);
@ -479,10 +479,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@ -619,7 +627,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
@ -1430,6 +1438,46 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(result));
ASSERT(object.is(r0));
ASSERT(!scratch.is(scratch0()));
ASSERT(!scratch.is(object));
#ifdef DEBUG
__ AbortIfSmi(object);
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
__ Assert(eq, "Trying to get date field from non-date.");
#endif
if (index->value() == 0) {
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand(stamp));
__ ldr(scratch, MemOperand(scratch));
__ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
__ cmp(scratch, scratch0());
__ b(ne, &runtime);
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(r1, Operand(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
}
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@ -3222,15 +3270,62 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LRandom* instr_;
};
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(d7));
ASSERT(ToRegister(instr->InputAt(0)).is(r0));
__ PrepareCallCFunction(1, scratch0());
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
// r2: FixedArray of the global context's random seeds
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
__ cmp(r1, Operand(0));
__ b(eq, deferred->entry());
// Load state[1].
__ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
// r1: state[0].
// r0: state[1].
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
__ and_(r3, r1, Operand(0xFFFF));
__ mov(r4, Operand(18273));
__ mul(r3, r3, r4);
__ add(r1, r3, Operand(r1, LSR, 16));
// Save state[0].
__ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
__ and_(r3, r0, Operand(0xFFFF));
__ mov(r4, Operand(36969));
__ mul(r3, r3, r4);
__ add(r0, r3, Operand(r0, LSR, 16));
// Save state[1].
__ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
__ and_(r0, r0, Operand(0x3FFFF));
__ add(r0, r0, Operand(r1, LSL, 14));
__ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
@ -3245,6 +3340,13 @@ void LCodeGen::DoRandom(LRandom* instr) {
}
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, scratch0());
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Return value is in r0.
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@ -4322,6 +4424,80 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
class DeferredAllocateObject: public LDeferredCode {
public:
DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Register scratch2 = ToRegister(instr->TempAt(1));
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ AllocateInNewSpace(instance_size,
result,
scratch,
scratch2,
deferred->entry(),
TAG_OBJECT);
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
__ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
// Initialize map and fields of the newly allocated object.
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
__ str(map, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
__ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
if (initial_map->inobject_properties() != 0) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = JSObject::kHeaderSize + i * kPointerSize;
__ str(scratch, FieldMemOperand(result, property_offset));
}
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ mov(result, Operand(0));
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ LoadHeapObject(r0, constructor);
__ push(r0);
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
__ StoreToSafepointRegisterSlot(r0, result);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =

2
deps/v8/src/arm/lithium-codegen-arm.h

@ -114,8 +114,10 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);

2
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1055,7 +1055,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
MaybeObject* result = Execution::HandleStackGuardInterrupt();
MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();

100
deps/v8/src/arm/stub-cache-arm.cc

@ -43,59 +43,83 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register receiver,
Register name,
// Number of the cache entry, not scaled.
Register offset,
int offset_shift_bits,
Register scratch,
Register scratch2) {
Register scratch2,
Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
ASSERT(map_off_addr > key_off_addr);
ASSERT((map_off_addr - key_off_addr) % 4 == 0);
ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register offsets_base_addr = scratch;
Register base_addr = scratch;
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ add(offset_scratch, offset, Operand(offset, LSL, 1));
// Calculate the base address of the entry.
__ mov(base_addr, Operand(key_offset));
__ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
// Check that the key in the entry matches the name.
__ mov(offsets_base_addr, Operand(key_offset));
__ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
__ ldr(ip, MemOperand(base_addr, 0));
__ cmp(name, ip);
__ b(ne, &miss);
// Check the map matches.
__ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
__ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ cmp(ip, scratch2);
__ b(ne, &miss);
// Get the code entry from the cache.
__ add(offsets_base_addr, offsets_base_addr,
Operand(value_off_addr - key_off_addr));
__ ldr(scratch2,
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
Register code = scratch2;
scratch2 = no_reg;
__ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
__ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
Register flags_reg = base_addr;
base_addr = no_reg;
__ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
// It's a nice optimization if this constant is encodable in the bic insn.
uint32_t mask = Code::kFlagsNotUsedInLookup;
ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
__ bic(scratch2, scratch2, Operand(mask));
__ bic(flags_reg, flags_reg, Operand(mask));
// Using cmn and the negative instead of cmp means we can use movw.
if (flags < 0) {
__ cmn(scratch2, Operand(-flags));
__ cmn(flags_reg, Operand(-flags));
} else {
__ cmp(scratch2, Operand(flags));
__ cmp(flags_reg, Operand(flags));
}
__ b(ne, &miss);
// Re-load code entry from cache.
__ ldr(offset,
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(offset);
__ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
// Miss: fall through.
__ bind(&miss);
@ -167,13 +191,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The shifting code relies on the
// entry size being 8.
ASSERT(sizeof(Entry) == 8);
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -193,6 +218,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
ASSERT(!extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
@ -201,29 +231,32 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize;
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
// Mask down the eor argument to the minimum to keep the immediate
// ARM-encodable.
__ eor(scratch, scratch, Operand(flags & mask));
__ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
// Prefer and_ to ubfx here because ubfx takes 2 cycles.
__ and_(scratch, scratch, Operand(mask));
__ mov(scratch, Operand(scratch, LSR, 1));
// Probe the primary table.
ProbeTable(isolate,
masm,
flags,
kPrimary,
receiver,
name,
scratch,
1,
extra,
extra2);
extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name, LSR, 1));
uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1);
__ add(scratch, scratch, Operand((flags >> 1) & mask2));
__ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
uint32_t mask2 = kSecondaryTableSize - 1;
__ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
@ -231,15 +264,18 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
masm,
flags,
kSecondary,
receiver,
name,
scratch,
1,
extra,
extra2);
extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
extra2, extra3);
}

11
deps/v8/src/assembler.cc

@ -813,6 +813,17 @@ ExternalReference ExternalReference::random_uint32_function(
}
ExternalReference ExternalReference::get_date_field_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(

3
deps/v8/src/assembler.h

@ -595,6 +595,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);

131
deps/v8/src/ast.cc

@ -76,7 +76,8 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
is_this_(var->is_this()),
is_trivial_(false),
is_lvalue_(false),
position_(RelocInfo::kNoPosition) {
position_(RelocInfo::kNoPosition),
interface_(var->interface()) {
BindTo(var);
}
@ -84,14 +85,16 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
VariableProxy::VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
int position)
int position,
Interface* interface)
: Expression(isolate),
name_(name),
var_(NULL),
is_this_(is_this),
is_trivial_(false),
is_lvalue_(false),
position_(position) {
position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
}
@ -168,12 +171,15 @@ LanguageMode FunctionLiteral::language_mode() const {
}
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
ObjectLiteral::Property::Property(Literal* key,
Expression* value,
Isolate* isolate) {
emit_store_ = true;
key_ = key;
value_ = value;
Object* k = *key->handle();
if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
if (k->IsSymbol() &&
isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@ -237,55 +243,21 @@ bool IsEqualNumber(void* first, void* second) {
void ObjectLiteral::CalculateEmitStore() {
ZoneHashMap properties(&IsEqualString);
ZoneHashMap elements(&IsEqualNumber);
for (int i = this->properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = this->properties()->at(i);
ZoneHashMap table(Literal::Match);
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
Literal* literal = property->key();
Handle<Object> handle = literal->handle();
if (handle->IsNull()) {
continue;
}
uint32_t hash;
ZoneHashMap* table;
void* key;
Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
Handle<Object> key_handle = factory->NewNumberFromUint(hash);
key = key_handle.location();
table = &elements;
} else {
key = name.location();
hash = name->Hash();
table = &properties;
}
} else if (handle->ToArrayIndex(&hash)) {
key = handle.location();
table = &elements;
} else {
ASSERT(handle->IsNumber());
double num = handle->Number();
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
table = &properties;
}
if (literal->handle()->IsNull()) continue;
uint32_t hash = literal->Hash();
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED) {
if (table->Lookup(key, hash, false) != NULL) {
property->set_emit_store(false);
}
if (property->kind() == ObjectLiteral::Property::COMPUTED &&
table.Lookup(literal, hash, false) != NULL) {
property->set_emit_store(false);
} else {
// Add key to the table.
table.Lookup(literal, hash, true);
}
// Add key to the table.
table->Lookup(key, hash, true);
}
}
@ -417,8 +389,8 @@ bool Declaration::IsInlineable() const {
return proxy()->var()->IsStackAllocated();
}
bool VariableDeclaration::IsInlineable() const {
return Declaration::IsInlineable() && fun() == NULL;
bool FunctionDeclaration::IsInlineable() const {
return false;
}
@ -517,13 +489,27 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
LookupResult lookup(type->GetIsolate());
while (true) {
type->LookupInDescriptors(NULL, *name, &lookup);
// For properties we know the target iff we have a constant function.
if (lookup.IsFound() && lookup.IsProperty()) {
if (lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
return true;
if (lookup.IsFound()) {
switch (lookup.type()) {
case CONSTANT_FUNCTION:
// We surely know the target for a constant function.
target_ =
Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
return true;
case NORMAL:
case FIELD:
case CALLBACKS:
case HANDLER:
case INTERCEPTOR:
// We don't know the target.
return false;
case MAP_TRANSITION:
case ELEMENTS_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
// Perhaps something interesting is up in the prototype chain...
break;
}
return false;
}
// If we reach the end of the prototype chain, we don't know the target.
if (!type->prototype()->IsJSObject()) return false;
@ -596,6 +582,14 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
}
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
}
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->CompareType(this);
if (info.IsSmi()) {
@ -995,7 +989,10 @@ CaseClause::CaseClause(Isolate* isolate,
}
INCREASE_NODE_COUNT(VariableDeclaration)
INCREASE_NODE_COUNT(FunctionDeclaration)
INCREASE_NODE_COUNT(ModuleDeclaration)
INCREASE_NODE_COUNT(ImportDeclaration)
INCREASE_NODE_COUNT(ExportDeclaration)
INCREASE_NODE_COUNT(ModuleLiteral)
INCREASE_NODE_COUNT(ModuleVariable)
INCREASE_NODE_COUNT(ModulePath)
@ -1137,4 +1134,22 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
}
}
Handle<String> Literal::ToString() {
if (handle_->IsString()) return Handle<String>::cast(handle_);
ASSERT(handle_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str;
if (handle_->IsSmi()) {
// Optimization only, the heap number case would subsume this.
OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
str = arr;
} else {
str = DoubleToCString(handle_->Number(), buffer);
}
return FACTORY->NewStringFromAscii(CStrVector(str));
}
} } // namespace v8::internal

188
deps/v8/src/ast.h

@ -41,6 +41,7 @@
#include "token.h"
#include "utils.h"
#include "variables.h"
#include "interface.h"
#include "zone-inl.h"
namespace v8 {
@ -61,7 +62,10 @@ namespace internal {
#define DECLARATION_NODE_LIST(V) \
V(VariableDeclaration) \
V(FunctionDeclaration) \
V(ModuleDeclaration) \
V(ImportDeclaration) \
V(ExportDeclaration) \
#define MODULE_NODE_LIST(V) \
V(ModuleLiteral) \
@ -444,10 +448,10 @@ class Declaration: public AstNode {
VariableProxy* proxy() const { return proxy_; }
VariableMode mode() const { return mode_; }
Scope* scope() const { return scope_; }
virtual InitializationFlag initialization() const = 0;
virtual bool IsInlineable() const;
virtual Declaration* AsDeclaration() { return this; }
virtual VariableDeclaration* AsVariableDeclaration() { return NULL; }
protected:
Declaration(VariableProxy* proxy,
@ -475,22 +479,43 @@ class VariableDeclaration: public Declaration {
public:
DECLARE_NODE_TYPE(VariableDeclaration)
virtual VariableDeclaration* AsVariableDeclaration() { return this; }
virtual InitializationFlag initialization() const {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
protected:
template<class> friend class AstNodeFactory;
VariableDeclaration(VariableProxy* proxy,
VariableMode mode,
Scope* scope)
: Declaration(proxy, mode, scope) {
}
};
FunctionLiteral* fun() const { return fun_; } // may be NULL
class FunctionDeclaration: public Declaration {
public:
DECLARE_NODE_TYPE(FunctionDeclaration)
FunctionLiteral* fun() const { return fun_; }
virtual InitializationFlag initialization() const {
return kCreatedInitialized;
}
virtual bool IsInlineable() const;
protected:
template<class> friend class AstNodeFactory;
VariableDeclaration(VariableProxy* proxy,
FunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
Scope* scope)
: Declaration(proxy, mode, scope),
fun_(fun) {
// At the moment there are no "const functions"'s in JavaScript...
ASSERT(fun == NULL || mode == VAR || mode == LET);
// At the moment there are no "const functions" in JavaScript...
ASSERT(mode == VAR || mode == LET);
ASSERT(fun != NULL);
}
private:
@ -503,6 +528,9 @@ class ModuleDeclaration: public Declaration {
DECLARE_NODE_TYPE(ModuleDeclaration)
Module* module() const { return module_; }
virtual InitializationFlag initialization() const {
return kCreatedInitialized;
}
protected:
template<class> friend class AstNodeFactory;
@ -519,10 +547,58 @@ class ModuleDeclaration: public Declaration {
};
class ImportDeclaration: public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
Module* module() const { return module_; }
virtual InitializationFlag initialization() const {
return kCreatedInitialized;
}
protected:
template<class> friend class AstNodeFactory;
ImportDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
: Declaration(proxy, LET, scope),
module_(module) {
}
private:
Module* module_;
};
class ExportDeclaration: public Declaration {
public:
DECLARE_NODE_TYPE(ExportDeclaration)
virtual InitializationFlag initialization() const {
return kCreatedInitialized;
}
protected:
template<class> friend class AstNodeFactory;
ExportDeclaration(VariableProxy* proxy,
Scope* scope)
: Declaration(proxy, LET, scope) {
}
};
class Module: public AstNode {
// TODO(rossberg): stuff to come...
public:
Interface* interface() const { return interface_; }
protected:
Module() {}
Module() : interface_(Interface::NewModule()) {}
explicit Module(Interface* interface) : interface_(interface) {}
private:
Interface* interface_;
};
@ -535,8 +611,9 @@ class ModuleLiteral: public Module {
protected:
template<class> friend class AstNodeFactory;
explicit ModuleLiteral(Block* body)
: body_(body) {
ModuleLiteral(Block* body, Interface* interface)
: Module(interface),
body_(body) {
}
private:
@ -553,9 +630,7 @@ class ModuleVariable: public Module {
protected:
template<class> friend class AstNodeFactory;
explicit ModuleVariable(VariableProxy* proxy)
: proxy_(proxy) {
}
inline explicit ModuleVariable(VariableProxy* proxy);
private:
VariableProxy* proxy_;
@ -1136,11 +1211,6 @@ class Literal: public Expression {
public:
DECLARE_NODE_TYPE(Literal)
// Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_);
}
virtual bool IsPropertyName() {
if (handle_->IsSymbol()) {
uint32_t ignored;
@ -1173,6 +1243,16 @@ class Literal: public Expression {
Handle<Object> handle() const { return handle_; }
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
uint32_t Hash() { return ToString()->Hash(); }
static bool Match(void* literal1, void* literal2) {
Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
return s1->Equals(*s2);
}
protected:
template<class> friend class AstNodeFactory;
@ -1181,6 +1261,8 @@ class Literal: public Expression {
handle_(handle) { }
private:
Handle<String> ToString();
Handle<Object> handle_;
};
@ -1232,7 +1314,7 @@ class ObjectLiteral: public MaterializedLiteral {
PROTOTYPE // Property is __proto__.
};
Property(Literal* key, Expression* value);
Property(Literal* key, Expression* value, Isolate* isolate);
Literal* key() { return key_; }
Expression* value() { return value_; }
@ -1382,6 +1464,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
int position() const { return position_; }
Interface* interface() const { return interface_; }
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
@ -1397,7 +1481,8 @@ class VariableProxy: public Expression {
VariableProxy(Isolate* isolate,
Handle<String> name,
bool is_this,
int position);
int position,
Interface* interface);
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
@ -1407,6 +1492,7 @@ class VariableProxy: public Expression {
// or with a increment/decrement operator.
bool is_lvalue_;
int position_;
Interface* interface_;
};
@ -1528,6 +1614,13 @@ class CallNew: public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
virtual int position() const { return pos_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() { return target_; }
// Bailout support.
int ReturnId() const { return return_id_; }
protected:
template<class> friend class AstNodeFactory;
@ -1538,12 +1631,19 @@ class CallNew: public Expression {
: Expression(isolate),
expression_(expression),
arguments_(arguments),
pos_(pos) { }
pos_(pos),
is_monomorphic_(false),
return_id_(GetNextId(isolate)) { }
private:
Expression* expression_;
ZoneList<Expression*>* arguments_;
int pos_;
bool is_monomorphic_;
Handle<JSFunction> target_;
int return_id_;
};
@ -2422,6 +2522,15 @@ class RegExpEmpty: public RegExpTree {
};
// ----------------------------------------------------------------------------
// Out-of-line inline constructors (to side-step cyclic dependencies).
inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
: Module(proxy->interface()),
proxy_(proxy) {
}
// ----------------------------------------------------------------------------
// Basic visitor
// - leaf node visitors are abstract.
@ -2518,13 +2627,21 @@ class AstNodeFactory BASE_EMBEDDED {
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
Scope* scope) {
VariableDeclaration* decl =
new(zone_) VariableDeclaration(proxy, mode, fun, scope);
new(zone_) VariableDeclaration(proxy, mode, scope);
VISIT_AND_RETURN(VariableDeclaration, decl)
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* fun,
Scope* scope) {
FunctionDeclaration* decl =
new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
VISIT_AND_RETURN(FunctionDeclaration, decl)
}
ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope) {
@ -2533,8 +2650,23 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(ModuleDeclaration, decl)
}
ModuleLiteral* NewModuleLiteral(Block* body) {
ModuleLiteral* module = new(zone_) ModuleLiteral(body);
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope) {
ImportDeclaration* decl =
new(zone_) ImportDeclaration(proxy, module, scope);
VISIT_AND_RETURN(ImportDeclaration, decl)
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
Scope* scope) {
ExportDeclaration* decl =
new(zone_) ExportDeclaration(proxy, scope);
VISIT_AND_RETURN(ExportDeclaration, decl)
}
ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
VISIT_AND_RETURN(ModuleLiteral, module)
}
@ -2690,9 +2822,11 @@ class AstNodeFactory BASE_EMBEDDED {
VariableProxy* NewVariableProxy(Handle<String> name,
bool is_this,
int position = RelocInfo::kNoPosition) {
int position = RelocInfo::kNoPosition,
Interface* interface =
Interface::NewValue()) {
VariableProxy* proxy =
new(zone_) VariableProxy(isolate_, name, is_this, position);
new(zone_) VariableProxy(isolate_, name, is_this, position, interface);
VISIT_AND_RETURN(VariableProxy, proxy)
}

2
deps/v8/src/bootstrapper.cc

@ -927,7 +927,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // --- D a t e ---
// Builtin functions for Date.prototype.
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
isolate->initial_object_prototype(),
Builtins::kIllegal, true);

56
deps/v8/src/builtins.cc

@ -310,28 +310,6 @@ BUILTIN(ArrayCodeGeneric) {
}
static void CopyElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
int dst_index,
FixedArray* src,
int src_index,
int len) {
if (len == 0) return;
ASSERT(dst != src); // Use MoveElements instead.
ASSERT(dst->map() != HEAP->fixed_cow_array_map());
ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index,
len);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
}
heap->incremental_marking()->RecordWrites(dst);
}
static void MoveElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
@ -531,7 +509,8 @@ BUILTIN(ArrayPush) {
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
@ -667,7 +646,8 @@ BUILTIN(ArrayUnshift) {
}
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
@ -778,8 +758,9 @@ BUILTIN(ArraySlice) {
if (!maybe_array->To(&result_array)) return maybe_array;
AssertNoAllocation no_gc;
CopyElements(heap, &no_gc, FixedArray::cast(result_array->elements()), 0,
elms, k, result_len);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, k,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, result_len);
return result_array;
}
@ -852,11 +833,9 @@ BUILTIN(ArraySplice) {
{
AssertNoAllocation no_gc;
// Fill newly created array.
CopyElements(heap,
&no_gc,
FixedArray::cast(result_array->elements()), 0,
elms, actual_start,
actual_delete_count);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, actual_start,
FixedArray::cast(result_array->elements()),
FAST_ELEMENTS, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
@ -906,12 +885,13 @@ BUILTIN(ArraySplice) {
{
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
new_elms, FAST_ELEMENTS, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyElements(heap, &no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
to_copy);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS,
actual_start + actual_delete_count,
new_elms, FAST_ELEMENTS,
actual_start + item_count, to_copy);
}
FillWithHoles(heap, new_elms, new_length, capacity);
@ -1000,7 +980,9 @@ BUILTIN(ArrayConcat) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
FixedArray* elms = FixedArray::cast(array->elements());
CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
CopyObjectToObjectElements(&no_gc, elms, FAST_ELEMENTS, 0,
result_elms, FAST_ELEMENTS,
start_pos, len);
start_pos += len;
}
ASSERT(start_pos == result_len);

9
deps/v8/src/codegen.h

@ -84,6 +84,15 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
namespace v8 {
namespace internal {
// Results of the library implementation of transcendental functions may differ
// from the one we use in our generated code. Therefore we use the same
// generated code both in runtime and compiled code.
typedef double (*TranscendentalFunction)(double x);
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type);
class ElementsTransitionGenerator : public AllStatic {
public:
static void GenerateSmiOnlyToObject(MacroAssembler* masm);

2
deps/v8/src/compiler.cc

@ -116,9 +116,9 @@ void CompilationInfo::DisableOptimization() {
bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_self_optimization &&
FLAG_crankshaft &&
!Serializer::enabled() &&
!function()->flags()->Contains(kDontSelfOptimize) &&
!function()->flags()->Contains(kDontOptimize) &&
function()->scope()->allows_lazy_recompilation() &&
(shared_info().is_null() || !shared_info()->optimization_disabled());
}

3
deps/v8/src/d8.gyp

@ -41,9 +41,6 @@
'include_dirs+': [
'../src',
],
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
],
'sources': [
'd8.cc',
],

384
deps/v8/src/date.cc

@ -0,0 +1,384 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "date.h"
#include "v8.h"
#include "objects.h"
#include "objects-inl.h"
namespace v8 {
namespace internal {
static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
static const int kDaysIn4Years = 4 * 365 + 1;
static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
static const int kDays1970to2000 = 30 * 365 + 7;
static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
kDays1970to2000;
static const int kYearsOffset = 400000;
static const char kDaysInMonths[] =
{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
void DateCache::ResetDateCache() {
static const int kMaxStamp = Smi::kMaxValue;
stamp_ = Smi::FromInt(stamp_->value() + 1);
if (stamp_->value() > kMaxStamp) {
stamp_ = Smi::FromInt(0);
}
ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
for (int i = 0; i < kDSTSize; ++i) {
ClearSegment(&dst_[i]);
}
dst_usage_counter_ = 0;
before_ = &dst_[0];
after_ = &dst_[1];
local_offset_ms_ = kInvalidLocalOffsetInMs;
ymd_valid_ = false;
}
void DateCache::ClearSegment(DST* segment) {
segment->start_sec = kMaxEpochTimeInSec;
segment->end_sec = -kMaxEpochTimeInSec;
segment->offset_ms = 0;
segment->last_used = 0;
}
void DateCache::YearMonthDayFromDays(
int days, int* year, int* month, int* day) {
if (ymd_valid_) {
// Check conservatively if the given 'days' has
// the same year and month as the cached 'days'.
int new_day = ymd_day_ + (days - ymd_days_);
if (new_day >= 1 && new_day <= 28) {
ymd_day_ = new_day;
ymd_days_ = days;
*year = ymd_year_;
*month = ymd_month_;
*day = new_day;
return;
}
}
int save_days = days;
days += kDaysOffset;
*year = 400 * (days / kDaysIn400Years) - kYearsOffset;
days %= kDaysIn400Years;
ASSERT(DaysFromYearMonth(*year, 0) + days == save_days);
days--;
int yd1 = days / kDaysIn100Years;
days %= kDaysIn100Years;
*year += 100 * yd1;
days++;
int yd2 = days / kDaysIn4Years;
days %= kDaysIn4Years;
*year += 4 * yd2;
days--;
int yd3 = days / 365;
days %= 365;
*year += yd3;
bool is_leap = (!yd1 || yd2) && !yd3;
ASSERT(days >= -1);
ASSERT(is_leap || (days >= 0));
ASSERT((days < 365) || (is_leap && (days < 366)));
ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
days += is_leap;
// Check if the date is after February.
if (days >= 31 + 28 + is_leap) {
days -= 31 + 28 + is_leap;
// Find the date starting from March.
for (int i = 2; i < 12; i++) {
if (days < kDaysInMonths[i]) {
*month = i;
*day = days + 1;
break;
}
days -= kDaysInMonths[i];
}
} else {
// Check January and February.
if (days < 31) {
*month = 0;
*day = days + 1;
} else {
*month = 1;
*day = days - 31 + 1;
}
}
ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
ymd_valid_ = true;
ymd_year_ = *year;
ymd_month_ = *month;
ymd_day_ = *day;
ymd_days_ = save_days;
}
int DateCache::DaysFromYearMonth(int year, int month) {
static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
181, 212, 243, 273, 304, 334};
static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
182, 213, 244, 274, 305, 335};
year += month / 12;
month %= 12;
if (month < 0) {
year--;
month += 12;
}
ASSERT(month >= 0);
ASSERT(month < 12);
// year_delta is an arbitrary number such that:
// a) year_delta = -1 (mod 400)
// b) year + year_delta > 0 for years in the range defined by
// ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
// Jan 1 1970. This is required so that we don't run into integer
// division of negative numbers.
// c) there shouldn't be an overflow for 32-bit integers in the following
// operations.
static const int year_delta = 399999;
static const int base_day = 365 * (1970 + year_delta) +
(1970 + year_delta) / 4 -
(1970 + year_delta) / 100 +
(1970 + year_delta) / 400;
int year1 = year + year_delta;
int day_from_year = 365 * year1 +
year1 / 4 -
year1 / 100 +
year1 / 400 -
base_day;
if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
return day_from_year + day_from_month[month];
}
return day_from_year + day_from_month_leap[month];
}
void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
if (after_->offset_ms == offset_ms &&
after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
time_sec <= after_->end_sec) {
// Extend the after_ segment.
after_->start_sec = time_sec;
} else {
// The after_ segment is either invalid or starts too late.
if (after_->start_sec <= after_->end_sec) {
// If the after_ segment is valid, replace it with a new segment.
after_ = LeastRecentlyUsedDST(before_);
}
after_->start_sec = time_sec;
after_->end_sec = time_sec;
after_->offset_ms = offset_ms;
after_->last_used = ++dst_usage_counter_;
}
}
int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
int time_sec = (time_ms >= 0 && time_ms <= kMaxEpochTimeInMs)
? static_cast<int>(time_ms / 1000)
: static_cast<int>(EquivalentTime(time_ms) / 1000);
// Invalidate cache if the usage counter is close to overflow.
// Note that dst_usage_counter is incremented less than ten times
// in this function.
if (dst_usage_counter_ >= kMaxInt - 10) {
dst_usage_counter_ = 0;
for (int i = 0; i < kDSTSize; ++i) {
ClearSegment(&dst_[i]);
}
}
// Optimistic fast check.
if (before_->start_sec <= time_sec &&
time_sec <= before_->end_sec) {
// Cache hit.
before_->last_used = ++dst_usage_counter_;
return before_->offset_ms;
}
ProbeDST(time_sec);
ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec);
ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec);
if (InvalidSegment(before_)) {
// Cache miss.
before_->start_sec = time_sec;
before_->end_sec = time_sec;
before_->offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
before_->last_used = ++dst_usage_counter_;
return before_->offset_ms;
}
if (time_sec <= before_->end_sec) {
// Cache hit.
before_->last_used = ++dst_usage_counter_;
return before_->offset_ms;
}
if (time_sec > before_->end_sec + kDefaultDSTDeltaInSec) {
// If the before_ segment ends too early, then just
// query for the offset of the time_sec
int offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
ExtendTheAfterSegment(time_sec, offset_ms);
// This swap helps the optimistic fast check in subsequent invocations.
DST* temp = before_;
before_ = after_;
after_ = temp;
return offset_ms;
}
// Now the time_sec is between
// before_->end_sec and before_->end_sec + default DST delta.
// Update the usage counter of before_ since it is going to be used.
before_->last_used = ++dst_usage_counter_;
// Check if after_ segment is invalid or starts too late.
// Note that start_sec of invalid segments is kMaxEpochTimeInSec.
if (before_->end_sec + kDefaultDSTDeltaInSec <= after_->start_sec) {
int new_after_start_sec = before_->end_sec + kDefaultDSTDeltaInSec;
int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
} else {
ASSERT(!InvalidSegment(after_));
// Update the usage counter of after_ since it is going to be used.
after_->last_used = ++dst_usage_counter_;
}
// Now the time_sec is between before_->end_sec and after_->start_sec.
// Only one daylight savings offset change can occur in this interval.
if (before_->offset_ms == after_->offset_ms) {
// Merge two segments if they have the same offset.
before_->end_sec = after_->end_sec;
ClearSegment(after_);
return before_->offset_ms;
}
// Binary search for daylight savings offset change point,
// but give up if we don't find it in four iterations.
for (int i = 4; i >= 0; --i) {
int delta = after_->start_sec - before_->end_sec;
int middle_sec = (i == 0) ? time_sec : before_->end_sec + delta / 2;
int offset_ms = GetDaylightSavingsOffsetFromOS(middle_sec);
if (before_->offset_ms == offset_ms) {
before_->end_sec = middle_sec;
if (time_sec <= before_->end_sec) {
return offset_ms;
}
} else {
ASSERT(after_->offset_ms == offset_ms);
after_->start_sec = middle_sec;
if (time_sec >= after_->start_sec) {
// This swap helps the optimistic fast check in subsequent invocations.
DST* temp = before_;
before_ = after_;
after_ = temp;
return offset_ms;
}
}
}
UNREACHABLE();
return 0;
}
void DateCache::ProbeDST(int time_sec) {
DST* before = NULL;
DST* after = NULL;
ASSERT(before_ != after_);
for (int i = 0; i < kDSTSize; ++i) {
if (dst_[i].start_sec <= time_sec) {
if (before == NULL || before->start_sec < dst_[i].start_sec) {
before = &dst_[i];
}
} else if (time_sec < dst_[i].end_sec) {
if (after == NULL || after->end_sec > dst_[i].end_sec) {
after = &dst_[i];
}
}
}
// If before or after segments were not found,
// then set them to any invalid segment.
if (before == NULL) {
before = InvalidSegment(before_) ? before_ : LeastRecentlyUsedDST(after);
}
if (after == NULL) {
after = InvalidSegment(after_) && before != after_
? after_ : LeastRecentlyUsedDST(before);
}
ASSERT(before != NULL);
ASSERT(after != NULL);
ASSERT(before != after);
ASSERT(InvalidSegment(before) || before->start_sec <= time_sec);
ASSERT(InvalidSegment(after) || time_sec < after->start_sec);
ASSERT(InvalidSegment(before) || InvalidSegment(after) ||
before->end_sec < after->start_sec);
before_ = before;
after_ = after;
}
DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
DST* result = NULL;
for (int i = 0; i < kDSTSize; ++i) {
if (&dst_[i] == skip) continue;
if (result == NULL || result->last_used > dst_[i].last_used) {
result = &dst_[i];
}
}
ClearSegment(result);
return result;
}
} } // namespace v8::internal

260
deps/v8/src/date.h

@ -0,0 +1,260 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DATE_H_
#define V8_DATE_H_
#include "allocation.h"
#include "globals.h"
#include "platform.h"
namespace v8 {
namespace internal {
class DateCache {
public:
static const int kMsPerMin = 60 * 1000;
static const int kSecPerDay = 24 * 60 * 60;
static const int64_t kMsPerDay = kSecPerDay * 1000;
// The largest time that can be passed to OS date-time library functions.
static const int kMaxEpochTimeInSec = kMaxInt;
static const int64_t kMaxEpochTimeInMs =
static_cast<int64_t>(kMaxInt) * 1000;
// The largest time that can be stored in JSDate.
static const int64_t kMaxTimeInMs =
static_cast<int64_t>(864000000) * 10000000;
// Conservative upper bound on time that can be stored in JSDate
// before UTC conversion.
static const int64_t kMaxTimeBeforeUTCInMs =
kMaxTimeInMs + 10 * kMsPerDay;
// Sentinel that denotes an invalid local offset.
static const int kInvalidLocalOffsetInMs = kMaxInt;
// Sentinel that denotes an invalid cache stamp.
// It is an invariant of DateCache that cache stamp is non-negative.
static const int kInvalidStamp = -1;
DateCache() : stamp_(0) {
ResetDateCache();
}
virtual ~DateCache() {}
// Clears cached timezone information and increments the cache stamp.
void ResetDateCache();
// Computes floor(time_ms / kMsPerDay).
static int DaysFromTime(int64_t time_ms) {
if (time_ms < 0) time_ms -= (kMsPerDay - 1);
return static_cast<int>(time_ms / kMsPerDay);
}
// Computes modulo(time_ms, kMsPerDay) given that
// days = floor(time_ms / kMsPerDay).
static int TimeInDay(int64_t time_ms, int days) {
return static_cast<int>(time_ms - days * kMsPerDay);
}
// Given the number of days since the epoch, computes the weekday.
// ECMA 262 - 15.9.1.6.
int Weekday(int days) {
int result = (days + 4) % 7;
return result >= 0 ? result : result + 7;
}
bool IsLeap(int year) {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
}
// ECMA 262 - 15.9.1.7.
int LocalOffsetInMs() {
if (local_offset_ms_ == kInvalidLocalOffsetInMs) {
local_offset_ms_ = GetLocalOffsetFromOS();
}
return local_offset_ms_;
}
const char* LocalTimezone(int64_t time_ms) {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
}
return OS::LocalTimezone(static_cast<double>(time_ms));
}
// ECMA 262 - 15.9.5.26
int TimezoneOffset(int64_t time_ms) {
int64_t local_ms = ToLocal(time_ms);
return static_cast<int>((time_ms - local_ms) / kMsPerMin);
}
// ECMA 262 - 15.9.1.9
int64_t ToLocal(int64_t time_ms) {
return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
}
// ECMA 262 - 15.9.1.9
int64_t ToUTC(int64_t time_ms) {
time_ms -= LocalOffsetInMs();
return time_ms - DaylightSavingsOffsetInMs(time_ms);
}
// Computes a time equivalent to the given time according
// to ECMA 262 - 15.9.1.9.
// The issue here is that some library calls don't work right for dates
// that cannot be represented using a non-negative signed 32 bit integer
// (measured in whole seconds based on the 1970 epoch).
// We solve this by mapping the time to a year with same leap-year-ness
// and same starting day for the year. The ECMAscript specification says
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
int64_t EquivalentTime(int64_t time_ms) {
int days = DaysFromTime(time_ms);
int time_within_day_ms = static_cast<int>(time_ms - days * kMsPerDay);
int year, month, day;
YearMonthDayFromDays(days, &year, &month, &day);
int new_days = DaysFromYearMonth(EquivalentYear(year), month) + day - 1;
return static_cast<int64_t>(new_days) * kMsPerDay + time_within_day_ms;
}
// Returns an equivalent year in the range [2008-2035] matching
// - leap year,
// - week day of first day.
// ECMA 262 - 15.9.1.9.
int EquivalentYear(int year) {
int week_day = Weekday(DaysFromYearMonth(year, 0));
int recent_year = (IsLeap(year) ? 1956 : 1967) + (week_day * 12) % 28;
// Find the year in the range 2008..2037 that is equivalent mod 28.
// Add 3*28 to give a positive argument to the modulus operator.
return 2008 + (recent_year + 3 * 28 - 2008) % 28;
}
// Given the number of days since the epoch, computes
// the corresponding year, month, and day.
void YearMonthDayFromDays(int days, int* year, int* month, int* day);
// Computes the number of days since the epoch for
// the first day of the given month in the given year.
int DaysFromYearMonth(int year, int month);
// Cache stamp is used for invalidating caches in JSDate.
// We increment the stamp each time when the timezone information changes.
// JSDate objects perform stamp check and invalidate their caches if
// their saved stamp is not equal to the current stamp.
Smi* stamp() { return stamp_; }
void* stamp_address() { return &stamp_; }
// These functions are virtual so that we can override them when testing.
virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
double time_ms = static_cast<double>(time_sec * 1000);
return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
}
virtual int GetLocalOffsetFromOS() {
double offset = OS::LocalTimeOffset();
ASSERT(offset < kInvalidLocalOffsetInMs);
return static_cast<int>(offset);
}
private:
// The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per 19 days.
// In Egypt in 2010 they decided to suspend DST during Ramadan. This
// led to a short interval where DST is in effect from September 10 to
// September 30.
static const int kDefaultDSTDeltaInSec = 19 * kSecPerDay;
// Size of the Daylight Savings Time cache.
static const int kDSTSize = 32;
// Daylight Savings Time segment stores a segment of time where
// daylight savings offset does not change.
struct DST {
int start_sec;
int end_sec;
int offset_ms;
int last_used;
};
// Computes the daylight savings offset for the given time.
// ECMA 262 - 15.9.1.8
int DaylightSavingsOffsetInMs(int64_t time_ms);
// Sets the before_ and the after_ segments from the DST cache such that
// the before_ segment starts earlier than the given time and
// the after_ segment start later than the given time.
// Both segments might be invalid.
// The last_used counters of the before_ and after_ are updated.
void ProbeDST(int time_sec);
// Finds the least recently used segment from the DST cache that is not
// equal to the given 'skip' segment.
DST* LeastRecentlyUsedDST(DST* skip);
// Extends the after_ segment with the given point or resets it
// if it starts later than the given time + kDefaultDSTDeltaInSec.
inline void ExtendTheAfterSegment(int time_sec, int offset_ms);
// Makes the given segment invalid.
inline void ClearSegment(DST* segment);
bool InvalidSegment(DST* segment) {
return segment->start_sec > segment->end_sec;
}
Smi* stamp_;
// Daylight Saving Time cache.
DST dst_[kDSTSize];
int dst_usage_counter_;
DST* before_;
DST* after_;
int local_offset_ms_;
// Year/Month/Day cache.
bool ymd_valid_;
int ymd_days_;
int ymd_year_;
int ymd_month_;
int ymd_day_;
};
} } // namespace v8::internal
#endif

669
deps/v8/src/date.js

File diff suppressed because it is too large

5
deps/v8/src/debug-debugger.js

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -478,7 +478,8 @@ ScriptBreakPoint.prototype.clear = function () {
function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) {
var break_point = script_break_points[i];
if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName) &&
if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
break_point.matchesScript(script)) {
break_point.set(script);
}

40
deps/v8/src/deoptimizer.cc

@ -170,8 +170,16 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
deoptimizer->output_[frame_index - 1]->GetFrameType() ==
StackFrame::ARGUMENTS_ADAPTOR;
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(deoptimizer, frame_index, has_arguments_adaptor);
int construct_offset = has_arguments_adaptor ? 2 : 1;
bool has_construct_stub =
frame_index >= construct_offset &&
deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
StackFrame::CONSTRUCT;
DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
frame_index,
has_arguments_adaptor,
has_construct_stub);
isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
// Get the "simulated" top and size for the requested frame.
@ -570,6 +578,9 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::ARGUMENTS_ADAPTOR_FRAME:
DoComputeArgumentsAdaptorFrame(&iterator, i);
break;
case Translation::CONSTRUCT_STUB_FRAME:
DoComputeConstructStubFrame(&iterator, i);
break;
default:
UNREACHABLE();
break;
@ -686,6 +697,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::BEGIN:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@ -873,6 +885,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::BEGIN:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@ -1206,7 +1219,8 @@ FrameDescription::FrameDescription(uint32_t frame_size,
function_(function),
top_(kZapUint32),
pc_(kZapUint32),
fp_(kZapUint32) {
fp_(kZapUint32),
context_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
SetRegister(r, kZapUint32);
@ -1320,6 +1334,13 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray() {
}
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
@ -1402,6 +1423,7 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
case CONSTRUCT_STUB_FRAME:
return 2;
case JS_FRAME:
return 3;
@ -1421,6 +1443,8 @@ const char* Translation::StringFor(Opcode opcode) {
return "JS_FRAME";
case ARGUMENTS_ADAPTOR_FRAME:
return "ARGUMENTS_ADAPTOR_FRAME";
case CONSTRUCT_STUB_FRAME:
return "CONSTRUCT_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@ -1476,6 +1500,7 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::BEGIN:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME:
// Peeled off before getting here.
break;
@ -1598,10 +1623,13 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo::DeoptimizedFrameInfo(
Deoptimizer* deoptimizer, int frame_index, bool has_arguments_adaptor) {
DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
int frame_index,
bool has_arguments_adaptor,
bool has_construct_stub) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
SetFunction(output_frame->GetFunction());
function_ = output_frame->GetFunction();
has_construct_stub_ = has_construct_stub;
expression_count_ = output_frame->GetExpressionCount();
expression_stack_ = new Object*[expression_count_];
// Get the source position using the unoptimized code.

23
deps/v8/src/deoptimizer.h

@ -283,6 +283,8 @@ class Deoptimizer : public Malloced {
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
void DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@ -431,6 +433,9 @@ class FrameDescription {
intptr_t GetFp() const { return fp_; }
void SetFp(intptr_t fp) { fp_ = fp; }
intptr_t GetContext() const { return context_; }
void SetContext(intptr_t context) { context_ = context; }
Smi* GetState() const { return state_; }
void SetState(Smi* state) { state_ = state; }
@ -492,6 +497,7 @@ class FrameDescription {
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
#ifdef DEBUG
@ -556,6 +562,7 @@ class Translation BASE_EMBEDDED {
enum Opcode {
BEGIN,
JS_FRAME,
CONSTRUCT_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
@ -584,6 +591,7 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(int node_id, int literal_id, unsigned height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
@ -716,7 +724,8 @@ class DeoptimizedFrameInfo : public Malloced {
public:
DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
int frame_index,
bool has_arguments_adaptor);
bool has_arguments_adaptor,
bool has_construct_stub);
virtual ~DeoptimizedFrameInfo();
// GC support.
@ -733,6 +742,12 @@ class DeoptimizedFrameInfo : public Malloced {
return function_;
}
// Check if this frame is preceded by construct stub frame. The bottom-most
// inlined frame might still be called by an uninlined construct stub.
bool HasConstructStub() {
return has_construct_stub_;
}
// Get an incoming argument.
Object* GetParameter(int index) {
ASSERT(0 <= index && index < parameters_count());
@ -750,11 +765,6 @@ class DeoptimizedFrameInfo : public Malloced {
}
private:
// Set the frame function.
void SetFunction(JSFunction* function) {
function_ = function;
}
// Set an incoming argument.
void SetParameter(int index, Object* obj) {
ASSERT(0 <= index && index < parameters_count());
@ -768,6 +778,7 @@ class DeoptimizedFrameInfo : public Malloced {
}
JSFunction* function_;
bool has_construct_stub_;
int parameters_count_;
int expression_count_;
Object** parameters_;

659
deps/v8/src/elements.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -59,6 +59,53 @@ namespace v8 {
namespace internal {
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
// fast element handler for smi-only arrays. The implementation is currently
// identical. Note that the order must match that of the ElementsKind enum for
// the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \
V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \
V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \
SeededNumberDictionary) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \
FixedArray) \
V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS, \
ExternalByteArray) \
V(ExternalUnsignedByteElementsAccessor, \
EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray) \
V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS, \
ExternalShortArray) \
V(ExternalUnsignedShortElementsAccessor, \
EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray) \
V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS, \
ExternalIntArray) \
V(ExternalUnsignedIntElementsAccessor, \
EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray) \
V(ExternalFloatElementsAccessor, \
EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray) \
V(ExternalDoubleElementsAccessor, \
EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray) \
V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
template<ElementsKind Kind> class ElementsKindTraits {
public:
typedef FixedArrayBase BackingStore;
};
#define ELEMENTS_TRAITS(Class, KindParam, Store) \
template<> class ElementsKindTraits<KindParam> { \
public: \
static const ElementsKind Kind = KindParam; \
typedef Store BackingStore; \
};
ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
ElementsAccessor** ElementsAccessor::elements_accessors_;
@ -84,6 +131,140 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
ASSERT(to_obj->map() != HEAP->fixed_cow_array_map());
ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
if (copy_size == 0) return;
Address to = to_obj->address() + FixedArray::kHeaderSize;
Address from = from_obj->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to) + to_start,
reinterpret_cast<Object**>(from) + from_start,
copy_size);
if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
Heap* heap = from_obj->GetHeap();
WriteBarrierMode mode = to_obj->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
heap->RecordWrites(to_obj->address(),
to_obj->OffsetOfElementAt(to_start),
copy_size);
}
heap->incremental_marking()->RecordWrites(to_obj);
}
}
static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
ASSERT(to != from);
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
ASSERT(copy_size == -1 ||
(copy_size + static_cast<int>(to_start)) <= to->length());
WriteBarrierMode mode = to_kind == FAST_ELEMENTS
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
uint32_t copy_limit = (copy_size == -1)
? to->length()
: Min(to_start + copy_size, static_cast<uint32_t>(to->length()));
for (int i = 0; i < from->Capacity(); ++i) {
Object* key = from->KeyAt(i);
if (key->IsNumber()) {
uint32_t entry = static_cast<uint32_t>(key->Number());
if (entry >= to_start && entry < copy_limit) {
Object* value = from->ValueAt(i);
ASSERT(to_kind == FAST_ELEMENTS || value->IsSmi());
to->set(entry, value, mode);
}
}
}
}
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
FixedDoubleArray* from_obj,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
if (copy_size == 0) return from_obj;
for (int i = 0; i < copy_size; ++i) {
if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
UNIMPLEMENTED();
return Failure::Exception();
} else {
MaybeObject* maybe_value = from_obj->get(i + from_start);
Object* value;
ASSERT(to_kind == FAST_ELEMENTS);
// Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
// iteratively, the allocate must succeed within a single GC cycle,
// otherwise the retry after the GC will also fail. In order to ensure
// that no GC is triggered, allocate HeapNumbers from old space if they
// can't be taken from new space.
if (!maybe_value->ToObject(&value)) {
ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
Heap* heap = from_obj->GetHeap();
MaybeObject* maybe_value_object =
heap->AllocateHeapNumber(from_obj->get_scalar(i + from_start),
TENURED);
if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
}
to_obj->set(i + to_start, value, UPDATE_WRITE_BARRIER);
}
}
return to_obj;
}
static void CopyDoubleToDoubleElements(FixedDoubleArray* from_obj,
uint32_t from_start,
FixedDoubleArray* to_obj,
uint32_t to_start,
int copy_size) {
if (copy_size == -1) {
copy_size = Min(from_obj->length() - from_start,
to_obj->length() - to_start);
}
ASSERT(((copy_size + static_cast<int>(to_start)) <= to_obj->length() &&
(copy_size + static_cast<int>(from_start)) <= from_obj->length()));
if (copy_size == 0) return;
Address to = to_obj->address() + FixedDoubleArray::kHeaderSize;
Address from = from_obj->address() + FixedDoubleArray::kHeaderSize;
to += kDoubleSize * to_start;
from += kDoubleSize * from_start;
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to),
reinterpret_cast<Object**>(from),
words_per_double * copy_size);
}
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@ -101,37 +282,67 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
// http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use
// CRTP to guarantee aggressive compile time optimizations (i.e. inlining and
// specialization of SomeElementsAccessor methods).
template <typename ElementsAccessorSubclass, typename BackingStoreClass>
template <typename ElementsAccessorSubclass,
typename ElementsTraitsParam>
class ElementsAccessorBase : public ElementsAccessor {
protected:
ElementsAccessorBase() { }
virtual MaybeObject* Get(FixedArrayBase* backing_store,
explicit ElementsAccessorBase(const char* name)
: ElementsAccessor(name) { }
typedef ElementsTraitsParam ElementsTraits;
typedef typename ElementsTraitsParam::BackingStore BackingStore;
virtual ElementsKind kind() const { return ElementsTraits::Kind; }
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
BackingStore* backing_store) {
MaybeObject* element =
ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
return !element->IsTheHole();
}
virtual bool HasElement(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, BackingStore::cast(backing_store));
}
virtual MaybeObject* Get(Object* receiver,
JSObject* holder,
uint32_t key,
JSObject* obj,
Object* receiver) {
FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetImpl(
BackingStoreClass::cast(backing_store), key, obj, receiver);
receiver, holder, key, BackingStore::cast(backing_store));
}
static MaybeObject* GetImpl(BackingStoreClass* backing_store,
uint32_t key,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
Object* receiver) {
uint32_t key,
BackingStore* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
? backing_store->get(key)
: backing_store->GetHeap()->the_hole_value();
}
virtual MaybeObject* SetLength(JSObject* obj,
virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
ASSERT(obj->IsJSArray());
return ElementsAccessorSubclass::SetLengthImpl(
BackingStoreClass::cast(obj->elements()), obj, length);
array, length, BackingStore::cast(array->elements()));
}
static MaybeObject* SetLengthImpl(BackingStoreClass* backing_store,
JSObject* obj,
Object* length);
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
BackingStore* backing_store);
virtual MaybeObject* SetCapacityAndLength(JSArray* array,
int capacity,
@ -153,10 +364,34 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
FixedArray* to,
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
UNREACHABLE();
return NULL;
}
virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
if (from == NULL) {
from = from_holder->elements();
}
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, copy_size);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
JSObject* holder,
Object* receiver) {
FixedArray* to,
FixedArrayBase* from) {
int len0 = to->length();
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
@ -165,7 +400,10 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
#endif
BackingStoreClass* backing_store = BackingStoreClass::cast(from);
if (from == NULL) {
from = holder->elements();
}
BackingStore* backing_store = BackingStore::cast(from);
uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty.
@ -173,15 +411,15 @@ class ElementsAccessorBase : public ElementsAccessor {
if (len1 == 0) return to;
// Compute how many elements are not in other.
int extra = 0;
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasElementAtIndexImpl(
backing_store, y, holder, receiver)) {
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
ElementsAccessorSubclass::GetImpl(backing_store, key,
holder, receiver);
ElementsAccessorSubclass::GetImpl(receiver, holder,
key, backing_store);
Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
ASSERT(!value->IsTheHole());
@ -210,15 +448,15 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
// Fill in the extra values.
int index = 0;
uint32_t index = 0;
for (uint32_t y = 0; y < len1; y++) {
if (ElementsAccessorSubclass::HasElementAtIndexImpl(
backing_store, y, holder, receiver)) {
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
ElementsAccessorSubclass::GetImpl(backing_store, key,
holder, receiver);
ElementsAccessorSubclass::GetImpl(receiver, holder,
key, backing_store);
Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) {
@ -232,43 +470,24 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
static uint32_t GetCapacityImpl(BackingStoreClass* backing_store) {
static uint32_t GetCapacityImpl(BackingStore* backing_store) {
return backing_store->length();
}
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
return ElementsAccessorSubclass::GetCapacityImpl(
BackingStoreClass::cast(backing_store));
}
static bool HasElementAtIndexImpl(BackingStoreClass* backing_store,
uint32_t index,
JSObject* holder,
Object* receiver) {
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
MaybeObject* element =
ElementsAccessorSubclass::GetImpl(backing_store, key, holder, receiver);
return !element->IsTheHole();
}
virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
uint32_t index,
JSObject* holder,
Object* receiver) {
return ElementsAccessorSubclass::HasElementAtIndexImpl(
BackingStoreClass::cast(backing_store), index, holder, receiver);
BackingStore::cast(backing_store));
}
static uint32_t GetKeyForIndexImpl(BackingStoreClass* backing_store,
static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
uint32_t index) {
return index;
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) {
uint32_t index) {
return ElementsAccessorSubclass::GetKeyForIndexImpl(
BackingStoreClass::cast(backing_store), index);
BackingStore::cast(backing_store), index);
}
private:
@ -278,12 +497,18 @@ class ElementsAccessorBase : public ElementsAccessor {
// Super class for all fast element arrays.
template<typename FastElementsAccessorSubclass,
typename BackingStore,
typename KindTraits,
int ElementSize>
class FastElementsAccessor
: public ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore> {
: public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
public:
explicit FastElementsAccessor(const char* name)
: ElementsAccessorBase<FastElementsAccessorSubclass,
KindTraits>(name) {}
protected:
friend class ElementsAccessorBase<FastElementsAccessorSubclass, BackingStore>;
friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
typedef typename KindTraits::BackingStore BackingStore;
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
@ -338,9 +563,14 @@ class FastElementsAccessor
class FastObjectElementsAccessor
: public FastElementsAccessor<FastObjectElementsAccessor,
FixedArray,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize> {
public:
explicit FastObjectElementsAccessor(const char* name)
: FastElementsAccessor<FastObjectElementsAccessor,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>(name) {}
static MaybeObject* DeleteCommon(JSObject* obj,
uint32_t key) {
ASSERT(obj->HasFastElements() ||
@ -387,6 +617,28 @@ class FastObjectElementsAccessor
return heap->true_value();
}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
CopyObjectToObjectElements(
&no_gc, FixedArray::cast(from), ElementsTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
}
default:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
@ -401,7 +653,7 @@ class FastObjectElementsAccessor
protected:
friend class FastElementsAccessor<FastObjectElementsAccessor,
FixedArray,
ElementsKindTraits<FAST_ELEMENTS>,
kPointerSize>;
virtual MaybeObject* Delete(JSObject* obj,
@ -414,8 +666,14 @@ class FastObjectElementsAccessor
class FastDoubleElementsAccessor
: public FastElementsAccessor<FastDoubleElementsAccessor,
FixedDoubleArray,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize> {
public:
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<FastDoubleElementsAccessor,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>(name) {}
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
@ -424,11 +682,34 @@ class FastDoubleElementsAccessor
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
friend class FastElementsAccessor<FastDoubleElementsAccessor,
FixedDoubleArray,
ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
kDoubleSize>;
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
return CopyDoubleToObjectElements(
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
return from;
default:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@ -441,38 +722,45 @@ class FastDoubleElementsAccessor
return obj->GetHeap()->true_value();
}
static bool HasElementAtIndexImpl(FixedDoubleArray* backing_store,
uint32_t index,
JSObject* holder,
Object* receiver) {
return !backing_store->is_the_hole(index);
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedDoubleArray* backing_store) {
return !backing_store->is_the_hole(key);
}
};
// Super class for all external element arrays.
template<typename ExternalElementsAccessorSubclass,
typename ExternalArray>
ElementsKind Kind>
class ExternalElementsAccessor
: public ElementsAccessorBase<ExternalElementsAccessorSubclass,
ExternalArray> {
ElementsKindTraits<Kind> > {
public:
explicit ExternalElementsAccessor(const char* name)
: ElementsAccessorBase<ExternalElementsAccessorSubclass,
ElementsKindTraits<Kind> >(name) {}
protected:
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
ExternalArray>;
ElementsKindTraits<Kind> >;
static MaybeObject* GetImpl(ExternalArray* backing_store,
uint32_t key,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
Object* receiver) {
uint32_t key,
BackingStore* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
? backing_store->get(key)
: backing_store->GetHeap()->undefined_value();
}
static MaybeObject* SetLengthImpl(ExternalArray* backing_store,
JSObject* obj,
Object* length) {
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
BackingStore* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
@ -484,67 +772,116 @@ class ExternalElementsAccessor
// External arrays always ignore deletes.
return obj->GetHeap()->true_value();
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
BackingStore* backing_store) {
uint32_t capacity =
ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
return key < capacity;
}
};
class ExternalByteElementsAccessor
: public ExternalElementsAccessor<ExternalByteElementsAccessor,
ExternalByteArray> {
EXTERNAL_BYTE_ELEMENTS> {
public:
explicit ExternalByteElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalByteElementsAccessor,
EXTERNAL_BYTE_ELEMENTS>(name) {}
};
class ExternalUnsignedByteElementsAccessor
: public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
ExternalUnsignedByteArray> {
EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
public:
explicit ExternalUnsignedByteElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
};
class ExternalShortElementsAccessor
: public ExternalElementsAccessor<ExternalShortElementsAccessor,
ExternalShortArray> {
EXTERNAL_SHORT_ELEMENTS> {
public:
explicit ExternalShortElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalShortElementsAccessor,
EXTERNAL_SHORT_ELEMENTS>(name) {}
};
class ExternalUnsignedShortElementsAccessor
: public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
ExternalUnsignedShortArray> {
EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
public:
explicit ExternalUnsignedShortElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
};
class ExternalIntElementsAccessor
: public ExternalElementsAccessor<ExternalIntElementsAccessor,
ExternalIntArray> {
EXTERNAL_INT_ELEMENTS> {
public:
explicit ExternalIntElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalIntElementsAccessor,
EXTERNAL_INT_ELEMENTS>(name) {}
};
class ExternalUnsignedIntElementsAccessor
: public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
ExternalUnsignedIntArray> {
EXTERNAL_UNSIGNED_INT_ELEMENTS> {
public:
explicit ExternalUnsignedIntElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
};
class ExternalFloatElementsAccessor
: public ExternalElementsAccessor<ExternalFloatElementsAccessor,
ExternalFloatArray> {
EXTERNAL_FLOAT_ELEMENTS> {
public:
explicit ExternalFloatElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalFloatElementsAccessor,
EXTERNAL_FLOAT_ELEMENTS>(name) {}
};
class ExternalDoubleElementsAccessor
: public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
ExternalDoubleArray> {
EXTERNAL_DOUBLE_ELEMENTS> {
public:
explicit ExternalDoubleElementsAccessor(const char* name)
: ExternalElementsAccessor<ExternalDoubleElementsAccessor,
EXTERNAL_DOUBLE_ELEMENTS>(name) {}
};
class PixelElementsAccessor
: public ExternalElementsAccessor<PixelElementsAccessor,
ExternalPixelArray> {
EXTERNAL_PIXEL_ELEMENTS> {
public:
explicit PixelElementsAccessor(const char* name)
: ExternalElementsAccessor<PixelElementsAccessor,
EXTERNAL_PIXEL_ELEMENTS>(name) {}
};
class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
SeededNumberDictionary> {
ElementsKindTraits<DICTIONARY_ELEMENTS> > {
public:
explicit DictionaryElementsAccessor(const char* name)
: ElementsAccessorBase<DictionaryElementsAccessor,
ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
@ -647,9 +984,29 @@ class DictionaryElementsAccessor
return heap->true_value();
}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
switch (to_kind) {
case FAST_SMI_ONLY_ELEMENTS:
case FAST_ELEMENTS:
CopyDictionaryToObjectElements(
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
default:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
}
protected:
friend class ElementsAccessorBase<DictionaryElementsAccessor,
SeededNumberDictionary>;
ElementsKindTraits<DICTIONARY_ELEMENTS> >;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
@ -657,10 +1014,10 @@ class DictionaryElementsAccessor
return DeleteCommon(obj, key, mode);
}
static MaybeObject* GetImpl(SeededNumberDictionary* backing_store,
uint32_t key,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
Object* receiver) {
uint32_t key,
SeededNumberDictionary* backing_store) {
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@ -677,6 +1034,14 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
SeededNumberDictionary* backing_store) {
return backing_store->FindEntry(key) !=
SeededNumberDictionary::kNotFound;
}
static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
uint32_t index) {
Object* key = dict->KeyAt(index);
@ -685,18 +1050,24 @@ class DictionaryElementsAccessor
};
class NonStrictArgumentsElementsAccessor
: public ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
FixedArray> {
class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
public:
explicit NonStrictArgumentsElementsAccessor(const char* name)
: ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
protected:
friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
FixedArray>;
friend class ElementsAccessorBase<
NonStrictArgumentsElementsAccessor,
ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
static MaybeObject* GetImpl(FixedArray* parameter_map,
uint32_t key,
static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
Object* receiver) {
Object* probe = GetParameterMapArg(parameter_map, key);
uint32_t key,
FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
int context_index = Smi::cast(probe)->value();
@ -706,7 +1077,7 @@ class NonStrictArgumentsElementsAccessor
// Object is not mapped, defer to the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
arguments, key, obj, receiver);
receiver, obj, key, arguments);
Object* result;
if (!maybe_result->ToObject(&result)) return maybe_result;
// Elements of the arguments object in slow mode might be slow aliases.
@ -722,9 +1093,9 @@ class NonStrictArgumentsElementsAccessor
}
}
static MaybeObject* SetLengthImpl(FixedArray* parameter_map,
JSObject* obj,
Object* length) {
static MaybeObject* SetLengthImpl(JSObject* obj,
Object* length,
FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
@ -735,7 +1106,7 @@ class NonStrictArgumentsElementsAccessor
uint32_t key,
JSReceiver::DeleteMode mode) {
FixedArray* parameter_map = FixedArray::cast(obj->elements());
Object* probe = GetParameterMapArg(parameter_map, key);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
// TODO(kmillikin): We could check if this was the last aliased
// parameter, and revert to normal elements in that case. That
@ -752,6 +1123,19 @@ class NonStrictArgumentsElementsAccessor
return obj->GetHeap()->true_value();
}
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
ElementsKind to_kind,
uint32_t to_start,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return accessor->CopyElements(NULL, from_start, to, to_kind,
to_start, copy_size, arguments);
}
static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
@ -763,24 +1147,27 @@ class NonStrictArgumentsElementsAccessor
return index;
}
static bool HasElementAtIndexImpl(FixedArray* parameter_map,
uint32_t index,
JSObject* holder,
Object* receiver) {
Object* probe = GetParameterMapArg(parameter_map, index);
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return !accessor->Get(arguments, index, holder, receiver)->IsTheHole();
return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
}
}
private:
static Object* GetParameterMapArg(FixedArray* parameter_map,
static Object* GetParameterMapArg(JSObject* holder,
FixedArray* parameter_map,
uint32_t key) {
uint32_t length = parameter_map->length();
uint32_t length = holder->IsJSArray()
? Smi::cast(JSArray::cast(holder)->length())->value()
: parameter_map->length();
return key < (length - 2 )
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
@ -822,45 +1209,22 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
void ElementsAccessor::InitializeOncePerProcess() {
// First argument in list is the accessor class, the second argument is can
// be any arbitrary unique identifier, in this case chosen to be the
// corresponding enum. Use the fast element handler for smi-only arrays.
// The implementation is currently identical. Note that the order must match
// that of the ElementsKind enum for the |accessor_array[]| below to work.
#define ELEMENTS_LIST(V) \
V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS) \
V(FastObjectElementsAccessor, FAST_ELEMENTS) \
V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS) \
V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS) \
V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS) \
V(ExternalUnsignedByteElementsAccessor, EXTERNAL_UNSIGNED_BYTE_ELEMENTS) \
V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS) \
V(ExternalUnsignedShortElementsAccessor, EXTERNAL_UNSIGNED_SHORT_ELEMENTS) \
V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS) \
V(ExternalUnsignedIntElementsAccessor, EXTERNAL_UNSIGNED_INT_ELEMENTS) \
V(ExternalFloatElementsAccessor, EXTERNAL_FLOAT_ELEMENTS) \
V(ExternalDoubleElementsAccessor, EXTERNAL_DOUBLE_ELEMENTS) \
V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS)
static struct ConcreteElementsAccessors {
#define ACCESSOR_STRUCT(Class, Name) Class* Name##_handler;
#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
ELEMENTS_LIST(ACCESSOR_STRUCT)
#undef ACCESSOR_STRUCT
} element_accessors = {
#define ACCESSOR_INIT(Class, Name) new Class(),
#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
ELEMENTS_LIST(ACCESSOR_INIT)
#undef ACCESSOR_INIT
};
static ElementsAccessor* accessor_array[] = {
#define ACCESSOR_ARRAY(Class, Name) element_accessors.Name##_handler,
#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
ELEMENTS_LIST(ACCESSOR_ARRAY)
#undef ACCESSOR_ARRAY
};
#undef ELEMENTS_LIST
STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
kElementsKindCount);
@ -868,11 +1232,12 @@ void ElementsAccessor::InitializeOncePerProcess() {
}
template <typename ElementsAccessorSubclass, typename BackingStoreClass>
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
SetLengthImpl(BackingStoreClass* backing_store,
JSObject* obj,
Object* length) {
template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
typename ElementsKindTraits::BackingStore* backing_store) {
JSArray* array = JSArray::cast(obj);
// Fast case: The new length fits into a Smi.

88
deps/v8/src/elements.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,6 +29,8 @@
#define V8_ELEMENTS_H_
#include "objects.h"
#include "heap.h"
#include "isolate.h"
namespace v8 {
namespace internal {
@ -37,19 +39,38 @@ namespace internal {
// ElementsKinds.
class ElementsAccessor {
public:
ElementsAccessor() { }
explicit ElementsAccessor(const char* name) : name_(name) { }
virtual ~ElementsAccessor() { }
virtual MaybeObject* Get(FixedArrayBase* backing_store,
uint32_t key,
virtual ElementsKind kind() const = 0;
const char* name() const { return name_; }
// Returns true if a holder contains an element with the specified key
// without iterating up the prototype chain. The caller can optionally pass
// in the backing store to use for the check, which must be compatible with
// the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
// holder->elements() is used as the backing store.
virtual bool HasElement(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns the element with the specified key or undefined if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
virtual MaybeObject* Get(Object* receiver,
JSObject* holder,
Object* receiver) = 0;
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
virtual MaybeObject* SetLength(JSObject* holder,
virtual MaybeObject* SetLength(JSArray* holder,
Object* new_length) = 0;
// Modifies both the length and capacity of a JSArray, resizing the underlying
@ -62,14 +83,34 @@ class ElementsAccessor {
int capacity,
int length) = 0;
// Deletes an element in an object, returning a new elements backing store.
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
FixedArray* to,
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
// ignored.
virtual MaybeObject* CopyElements(JSObject* source_holder,
uint32_t source_start,
FixedArrayBase* destination,
ElementsKind destination_kind,
uint32_t destination_start,
int copy_size,
FixedArrayBase* source = NULL) = 0;
MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
ElementsKind to_kind,
FixedArrayBase* from = NULL) {
return CopyElements(from_holder, 0, to, to_kind, 0, -1, from);
}
virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
JSObject* holder,
Object* receiver) = 0;
FixedArray* to,
FixedArrayBase* from = NULL) = 0;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@ -86,28 +127,35 @@ class ElementsAccessor {
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
uint32_t index,
JSObject* holder,
Object* receiver) = 0;
// Element handlers distinguish between indexes and keys when the manipulate
// Element handlers distinguish between indexes and keys when they manipulate
// elements. Indexes refer to elements in terms of their location in the
// underlying storage's backing store representation, and are between 0
// underlying storage's backing store representation, and are between 0 and
// GetCapacity. Keys refer to elements in terms of the value that would be
// specific in JavaScript to access the element. In most implementations, keys
// are equivalent to indexes, and GetKeyForIndex returns the same value it is
// passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps the
// index to a key using the KeyAt method on the NumberDictionary.
// specified in JavaScript to access the element. In most implementations,
// keys are equivalent to indexes, and GetKeyForIndex returns the same value
// it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
// the index to a key using the KeyAt method on the NumberDictionary.
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) = 0;
private:
static ElementsAccessor** elements_accessors_;
const char* name_;
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
void CopyObjectToObjectElements(AssertNoAllocation* no_gc,
FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size);
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_

12
deps/v8/src/execution.cc

@ -376,6 +376,12 @@ void StackGuard::DisableInterrupts() {
}
bool StackGuard::ShouldPostponeInterrupts() {
ExecutionAccess access(isolate_);
return should_postpone_interrupts(access);
}
bool StackGuard::IsInterrupted() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
@ -872,9 +878,11 @@ void Execution::ProcessDebugMessages(bool debug_command_only) {
#endif
MaybeObject* Execution::HandleStackGuardInterrupt() {
Isolate* isolate = Isolate::Current();
MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
StackGuard* stack_guard = isolate->stack_guard();
if (stack_guard->ShouldPostponeInterrupts()) {
return isolate->heap()->undefined_value();
}
if (stack_guard->IsGCRequest()) {
isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");

9
deps/v8/src/execution.h

@ -45,6 +45,10 @@ enum InterruptFlag {
GC_REQUEST = 1 << 6
};
class Isolate;
class Execution : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
@ -141,7 +145,8 @@ class Execution : public AllStatic {
// If the stack guard is triggered, but it is not an actual
// stack overflow, then handle the interruption accordingly.
MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt();
MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt(
Isolate* isolate);
// Get a function delegate (or undefined) for the given non-function
// object. Used for support calling objects as functions.
@ -158,7 +163,6 @@ class Execution : public AllStatic {
class ExecutionAccess;
class Isolate;
// StackGuard contains the handling of the limits that are used to limit the
@ -222,6 +226,7 @@ class StackGuard {
Address address_of_real_jslimit() {
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
}
bool ShouldPostponeInterrupts();
private:
StackGuard();

28
deps/v8/src/flag-definitions.h

@ -148,6 +148,7 @@ DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
"crankshaft harvests type feedback from stub cache")
DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
@ -167,30 +168,37 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
DEFINE_bool(inline_construct, false, "inline constructor calls")
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, false,
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, false, "enable all profiler experiments")
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
DEFINE_int(frame_count, 2, "number of stack frames inspected by the profiler")
DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
DEFINE_bool(self_optimization, false,
"primitive functions trigger their own optimization")
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
DEFINE_bool(count_based_interrupts, false,
"trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
DEFINE_int(interrupt_budget, 10000,
DEFINE_int(interrupt_budget, 5900,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 0,
DEFINE_int(type_info_threshold, 40,
"percentage of ICs that must have type info to allow optimization")
DEFINE_int(self_opt_count, 130, "call count before self-optimization")
DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
@ -485,6 +493,11 @@ DEFINE_bool(print_global_handles, false, "report global handles after GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
// interface.cc
DEFINE_bool(print_interfaces, false, "print interfaces")
DEFINE_bool(print_interface_details, false, "print interface inference details")
DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
// objects.cc
DEFINE_bool(trace_normalization,
false,
@ -562,6 +575,13 @@ DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
// code-stubs.cc
DEFINE_bool(print_code_stubs, false, "print code stubs")
DEFINE_bool(test_secondary_stub_cache,
false,
"test secondary stub cache by disabling the primary one")
DEFINE_bool(test_primary_stub_cache,
false,
"test primary stub cache by disabling the secondary one")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(print_code, false, "print generated code")

2
deps/v8/src/frames-inl.h

@ -191,7 +191,7 @@ inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
inline bool StandardFrame::IsConstructFrame(Address fp) {
Object* marker =
Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
return marker == Smi::FromInt(CONSTRUCT);
return marker == Smi::FromInt(StackFrame::CONSTRUCT);
}

49
deps/v8/src/frames.cc

@ -41,6 +41,22 @@
namespace v8 {
namespace internal {
static ReturnAddressLocationResolver return_address_location_resolver = NULL;
// Resolves pc_address through the resolution address function if one is set.
static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
if (return_address_location_resolver == NULL) {
return pc_address;
} else {
return reinterpret_cast<Address*>(
return_address_location_resolver(
reinterpret_cast<uintptr_t>(pc_address)));
}
}
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
@ -155,8 +171,8 @@ void StackFrameIterator::Reset() {
ASSERT(fp_ != NULL);
state.fp = fp_;
state.sp = sp_;
state.pc_address =
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
state.pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
type = StackFrame::ComputeType(isolate(), &state);
}
if (SingletonFor(type) == NULL) return;
@ -414,6 +430,13 @@ void StackFrame::IteratePc(ObjectVisitor* v,
}
void StackFrame::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver) {
ASSERT(return_address_location_resolver == NULL);
return_address_location_resolver = resolver;
}
StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
@ -488,8 +511,8 @@ void ExitFrame::ComputeCallerState(State* state) const {
// Set up the caller state.
state->sp = caller_sp();
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address
= reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
}
@ -523,7 +546,8 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPointerSize));
}
@ -558,7 +582,8 @@ int StandardFrame::ComputeExpressionsCount() const {
void StandardFrame::ComputeCallerState(State* state) const {
state->sp = caller_sp();
state->fp = caller_fp();
state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
}
@ -818,14 +843,11 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
// We create the summary in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
bool is_constructor = IsConstructor();
int i = jsframe_count;
while (i > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::JS_FRAME) {
// We don't inline constructor calls, so only the first, outermost
// frame can be a constructor frame in case of inlining.
bool is_constructor = (i == jsframe_count) && IsConstructor();
i--;
int ast_id = it.Next();
int function_id = it.Next();
@ -875,11 +897,18 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
frames->Add(summary);
is_constructor = false;
} else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
// The next encountered JS_FRAME will be marked as a constructor call.
it.Skip(Translation::NumberOfOperandsFor(opcode));
ASSERT(!is_constructor);
is_constructor = true;
} else {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
}
}
ASSERT(!is_constructor);
}

5
deps/v8/src/frames.h

@ -241,6 +241,11 @@ class StackFrame BASE_EMBEDDED {
virtual void Iterate(ObjectVisitor* v) const = 0;
static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
// Sets a callback function for return-address rewriting profilers
// to resolve the location of a return address to the location of the
// profiler's stashed return address.
static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver);
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };

73
deps/v8/src/full-codegen.cc

@ -55,10 +55,22 @@ void BreakableStatementChecker::VisitVariableDeclaration(
VariableDeclaration* decl) {
}
void BreakableStatementChecker::VisitFunctionDeclaration(
FunctionDeclaration* decl) {
}
void BreakableStatementChecker::VisitModuleDeclaration(
ModuleDeclaration* decl) {
}
void BreakableStatementChecker::VisitImportDeclaration(
ImportDeclaration* decl) {
}
void BreakableStatementChecker::VisitExportDeclaration(
ExportDeclaration* decl) {
}
void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
}
@ -569,29 +581,28 @@ void FullCodeGenerator::VisitDeclarations(
isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
int length = declarations->length();
for (int j = 0, i = 0; i < length; i++) {
VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration();
if (decl != NULL) {
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
if (fun_decl == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(decl->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(fun_decl->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
@ -605,11 +616,26 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
EmitDeclaration(decl->proxy(), decl->mode(), NULL);
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
// TODO(rossberg)
}
@ -1133,6 +1159,10 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
Label test, body;
Iteration loop_statement(this, stmt);
// Set statement position for a break slot before entering the for-body.
SetStatementPosition(stmt);
if (stmt->init() != NULL) {
Visit(stmt->init());
}
@ -1147,7 +1177,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
__ bind(loop_statement.continue_label());
SetStatementPosition(stmt);
if (stmt->next() != NULL) {
Visit(stmt->next());
}

3
deps/v8/src/full-codegen.h

@ -437,6 +437,9 @@ class FullCodeGenerator: public AstVisitor {
// the offset of the start of the table.
unsigned EmitStackCheckTable();
void EmitProfilingCounterDecrement(int delta);
void EmitProfilingCounterReset();
// Platform-specific return sequence
void EmitReturnSequence();

3
deps/v8/src/global-handles.cc

@ -384,6 +384,7 @@ GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
number_of_weak_handles_(0),
number_of_global_object_weak_handles_(0),
number_of_global_handles_(0),
first_block_(NULL),
first_used_block_(NULL),
first_free_(NULL),
@ -403,6 +404,7 @@ GlobalHandles::~GlobalHandles() {
Handle<Object> GlobalHandles::Create(Object* value) {
isolate_->counters()->global_handles()->Increment();
number_of_global_handles_++;
if (first_free_ == NULL) {
first_block_ = new NodeBlock(first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
@ -423,6 +425,7 @@ Handle<Object> GlobalHandles::Create(Object* value) {
void GlobalHandles::Destroy(Object** location) {
isolate_->counters()->global_handles()->Decrement();
number_of_global_handles_--;
if (location == NULL) return;
Node::FromLocation(location)->Release(this);
}

8
deps/v8/src/global-handles.h

@ -143,6 +143,11 @@ class GlobalHandles {
return number_of_global_object_weak_handles_;
}
// Returns the current number of handles to global objects.
int NumberOfGlobalHandles() {
return number_of_global_handles_;
}
// Clear the weakness of a global handle.
void ClearWeakness(Object** location);
@ -248,6 +253,9 @@ class GlobalHandles {
// number_of_weak_handles_.
int number_of_global_object_weak_handles_;
// Field always containing the number of handles to global objects.
int number_of_global_handles_;
// List of all allocated node blocks.
NodeBlock* first_block_;

16
deps/v8/src/globals.h

@ -175,27 +175,27 @@ typedef byte* Address;
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
#if V8_HOST_ARCH_64_BIT
#ifdef _MSC_VER
#if defined(_MSC_VER)
#define V8_UINT64_C(x) (x ## UI64)
#define V8_INT64_C(x) (x ## I64)
#define V8_INTPTR_C(x) (x ## I64)
#define V8_PTR_PREFIX "ll"
#else // _MSC_VER
#elif defined(__MINGW64__)
#define V8_UINT64_C(x) (x ## ULL)
#define V8_INT64_C(x) (x ## LL)
#define V8_INTPTR_C(x) (x ## LL)
#define V8_PTR_PREFIX "I64"
#else
#define V8_UINT64_C(x) (x ## UL)
#define V8_INT64_C(x) (x ## L)
#define V8_INTPTR_C(x) (x ## L)
#define V8_PTR_PREFIX "l"
#endif // _MSC_VER
#endif
#else // V8_HOST_ARCH_64_BIT
#define V8_INTPTR_C(x) (x)
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
#ifdef __MINGW64__
#undef V8_PTR_PREFIX
#define V8_PTR_PREFIX "I64"
#endif // __MINGW64__
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456);

9
deps/v8/src/heap-inl.h

@ -32,6 +32,7 @@
#include "isolate.h"
#include "list-inl.h"
#include "objects.h"
#include "platform.h"
#include "v8-counters.h"
#include "store-buffer.h"
#include "store-buffer-inl.h"
@ -658,15 +659,15 @@ double TranscendentalCache::SubCache::Calculate(double input) {
case ATAN:
return atan(input);
case COS:
return cos(input);
return fast_cos(input);
case EXP:
return exp(input);
case LOG:
return log(input);
return fast_log(input);
case SIN:
return sin(input);
return fast_sin(input);
case TAN:
return tan(input);
return fast_tan(input);
default:
return 0.0; // Never happens.
}

72
deps/v8/src/heap.cc

@ -499,7 +499,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR &&
!mark_compact_collector()->PreciseSweepingRequired() &&
!mark_compact_collector()->abort_incremental_marking_ &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
@ -578,6 +578,17 @@ static void VerifySymbolTable() {
}
static bool AbortIncrementalMarkingAndCollectGarbage(
Heap* heap,
AllocationSpace space,
const char* gc_reason = NULL) {
heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
bool result = heap->CollectGarbage(space, gc_reason);
heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
return result;
}
void Heap::ReserveSpace(
int new_space_size,
int pointer_space_size,
@ -604,28 +615,28 @@ void Heap::ReserveSpace(
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(OLD_POINTER_SPACE,
"failed to reserve space in the old pointer space");
AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
"failed to reserve space in the old pointer space");
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(OLD_DATA_SPACE,
"failed to reserve space in the old data space");
AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
"failed to reserve space in the old data space");
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(CODE_SPACE,
"failed to reserve space in the code space");
AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
"failed to reserve space in the code space");
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(MAP_SPACE,
"failed to reserve space in the map space");
AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
"failed to reserve space in the map space");
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(CELL_SPACE,
"failed to reserve space in the cell space");
AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
"failed to reserve space in the cell space");
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
@ -637,8 +648,8 @@ void Heap::ReserveSpace(
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(LO_SPACE,
"failed to reserve space in the large object space");
AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
"failed to reserve space in the large object space");
gc_performed = true;
}
}
@ -1909,11 +1920,10 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
MaybeObject* Heap::AllocateCodeCache() {
Object* result;
{ MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
if (!maybe_result->ToObject(&result)) return maybe_result;
CodeCache* code_cache;
{ MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
}
CodeCache* code_cache = CodeCache::cast(result);
code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
return code_cache;
@ -1926,22 +1936,20 @@ MaybeObject* Heap::AllocatePolymorphicCodeCache() {
MaybeObject* Heap::AllocateAccessorPair() {
Object* result;
{ MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
if (!maybe_result->ToObject(&result)) return maybe_result;
AccessorPair* accessors;
{ MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
if (!maybe_accessors->To(&accessors)) return maybe_accessors;
}
AccessorPair* accessors = AccessorPair::cast(result);
// Later we will have to distinguish between undefined and the hole...
// accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
// accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
return accessors;
}
MaybeObject* Heap::AllocateTypeFeedbackInfo() {
TypeFeedbackInfo* info;
{ MaybeObject* maybe_result = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
if (!maybe_result->To(&info)) return maybe_result;
{ MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
if (!maybe_info->To(&info)) return maybe_info;
}
info->set_ic_total_count(0);
info->set_ic_with_typeinfo_count(0);
@ -1953,8 +1961,8 @@ MaybeObject* Heap::AllocateTypeFeedbackInfo() {
MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
AliasedArgumentsEntry* entry;
{ MaybeObject* maybe_result = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
if (!maybe_result->To(&entry)) return maybe_result;
{ MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
if (!maybe_entry->To(&entry)) return maybe_entry;
}
entry->set_aliased_context_slot(aliased_context_slot);
return entry;
@ -6921,14 +6929,18 @@ void Heap::FreeQueuedChunks() {
// pieces and initialize size, owner and flags field of every piece.
// If FromAnyPointerAddress encounters a slot that belongs to one of
// these smaller pieces it will treat it as a slot on a normal Page.
Address chunk_end = chunk->address() + chunk->size();
MemoryChunk* inner = MemoryChunk::FromAddress(
chunk->address() + Page::kPageSize);
MemoryChunk* inner_last = MemoryChunk::FromAddress(
chunk->address() + chunk->size() - 1);
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
while (inner <= inner_last) {
// Size of a large chunk is always a multiple of
// OS::AllocateAlignment() so there is always
// enough space for a fake MemoryChunk header.
Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
// Guard against overflow.
if (area_end < inner->address()) area_end = chunk_end;
inner->SetArea(inner->address(), area_end);
inner->set_size(Page::kPageSize);
inner->set_owner(lo_space());
inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);

20
deps/v8/src/heap.h

@ -150,7 +150,8 @@ namespace internal {
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@ -1040,8 +1041,14 @@ class Heap {
const char* gc_reason = NULL);
static const int kNoGCFlags = 0;
static const int kMakeHeapIterableMask = 1;
static const int kSweepPreciselyMask = 1;
static const int kReduceMemoryFootprintMask = 2;
static const int kAbortIncrementalMarkingMask = 4;
// Making the heap iterable requires us to sweep precisely and abort any
// incremental marking as well.
static const int kMakeHeapIterableMask =
kSweepPreciselyMask | kAbortIncrementalMarkingMask;
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
// non-zero, then the slower precise sweeper is used, which leaves the heap
@ -1341,6 +1348,10 @@ class Heap {
return old_gen_allocation_limit_ - PromotedTotalSize();
}
inline intptr_t OldGenerationCapacityAvailable() {
return max_old_generation_size_ - PromotedTotalSize();
}
static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
@ -1567,6 +1578,11 @@ class Heap {
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void SetConstructStubDeoptPCOffset(int pc_offset) {
ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
private:
Heap();

168
deps/v8/src/hydrogen-instructions.cc

@ -285,6 +285,14 @@ HUseListNode* HUseListNode::tail() {
}
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(f)) return false;
}
return true;
}
HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
Advance();
}
@ -495,9 +503,9 @@ void HValue::RegisterUse(int index, HValue* new_value) {
}
void HValue::AddNewRange(Range* r) {
if (!HasRange()) ComputeInitialRange();
if (!HasRange()) range_ = new Range();
void HValue::AddNewRange(Range* r, Zone* zone) {
if (!HasRange()) ComputeInitialRange(zone);
if (!HasRange()) range_ = new(zone) Range();
ASSERT(HasRange());
r->StackUpon(range_);
range_ = r;
@ -511,9 +519,9 @@ void HValue::RemoveLastAddedRange() {
}
void HValue::ComputeInitialRange() {
void HValue::ComputeInitialRange(Zone* zone) {
ASSERT(!HasRange());
range_ = InferRange();
range_ = InferRange(zone);
ASSERT(HasRange());
}
@ -831,12 +839,12 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
HValue* HConstant::Canonicalize() {
return HasNoUses() && !IsBlockEntry() ? NULL : this;
return HasNoUses() ? NULL : this;
}
HValue* HTypeof::Canonicalize() {
return HasNoUses() && !IsBlockEntry() ? NULL : this;
return HasNoUses() ? NULL : this;
}
@ -858,6 +866,20 @@ HValue* HBitwise::Canonicalize() {
}
HValue* HAdd::Canonicalize() {
if (!representation().IsInteger32()) return this;
if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
return this;
}
HValue* HSub::Canonicalize() {
if (!representation().IsInteger32()) return this;
if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
return this;
}
HValue* HChange::Canonicalize() {
return (from().Equals(to())) ? value() : this;
}
@ -986,15 +1008,15 @@ void HInstanceOf::PrintDataTo(StringStream* stream) {
}
Range* HValue::InferRange() {
Range* HValue::InferRange(Zone* zone) {
// Untagged integer32 cannot be -0, all other representations can.
Range* result = new Range();
Range* result = new(zone) Range();
result->set_can_be_minus_zero(!representation().IsInteger32());
return result;
}
Range* HChange::InferRange() {
Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
to().IsTagged() &&
@ -1002,46 +1024,46 @@ Range* HChange::InferRange() {
set_type(HType::Smi());
}
Range* result = (input_range != NULL)
? input_range->Copy()
: HValue::InferRange();
? input_range->Copy(zone)
: HValue::InferRange(zone);
if (to().IsInteger32()) result->set_can_be_minus_zero(false);
return result;
}
Range* HConstant::InferRange() {
Range* HConstant::InferRange(Zone* zone) {
if (has_int32_value_) {
Range* result = new Range(int32_value_, int32_value_);
Range* result = new(zone) Range(int32_value_, int32_value_);
result->set_can_be_minus_zero(false);
return result;
}
return HValue::InferRange();
return HValue::InferRange(zone);
}
Range* HPhi::InferRange() {
Range* HPhi::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
if (block()->IsLoopHeader()) {
Range* range = new Range(kMinInt, kMaxInt);
Range* range = new(zone) Range(kMinInt, kMaxInt);
return range;
} else {
Range* range = OperandAt(0)->range()->Copy();
Range* range = OperandAt(0)->range()->Copy(zone);
for (int i = 1; i < OperandCount(); ++i) {
range->Union(OperandAt(i)->range());
}
return range;
}
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
Range* HAdd::InferRange() {
Range* HAdd::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy();
Range* res = a->Copy(zone);
if (!res->AddAndCheckOverflow(b)) {
ClearFlag(kCanOverflow);
}
@ -1049,32 +1071,32 @@ Range* HAdd::InferRange() {
res->set_can_be_minus_zero(m0);
return res;
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
Range* HSub::InferRange() {
Range* HSub::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy();
Range* res = a->Copy(zone);
if (!res->SubAndCheckOverflow(b)) {
ClearFlag(kCanOverflow);
}
res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
return res;
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
Range* HMul::InferRange() {
Range* HMul::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy();
Range* res = a->Copy(zone);
if (!res->MulAndCheckOverflow(b)) {
ClearFlag(kCanOverflow);
}
@ -1083,14 +1105,14 @@ Range* HMul::InferRange() {
res->set_can_be_minus_zero(m0);
return res;
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
Range* HDiv::InferRange() {
Range* HDiv::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* result = new Range();
Range* result = new(zone) Range();
if (left()->range()->CanBeMinusZero()) {
result->set_can_be_minus_zero(true);
}
@ -1108,15 +1130,15 @@ Range* HDiv::InferRange() {
}
return result;
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
Range* HMod::InferRange() {
Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* result = new Range();
Range* result = new(zone) Range();
if (a->CanBeMinusZero() || a->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}
@ -1125,7 +1147,7 @@ Range* HMod::InferRange() {
}
return result;
} else {
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
@ -1324,40 +1346,41 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
Range* HBitwise::InferRange() {
if (op() == Token::BIT_XOR) return HValue::InferRange();
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
: 0xffffffff;
: kDefaultMask;
int32_t right_mask = (right()->range() != NULL)
? right()->range()->Mask()
: 0xffffffff;
: kDefaultMask;
int32_t result_mask = (op() == Token::BIT_AND)
? left_mask & right_mask
: left_mask | right_mask;
return (result_mask >= 0)
? new Range(0, result_mask)
: HValue::InferRange();
? new(zone) Range(0, result_mask)
: HValue::InferRange(zone);
}
Range* HSar::InferRange() {
Range* HSar::InferRange(Zone* zone) {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
if (c->HasInteger32Value()) {
Range* result = (left()->range() != NULL)
? left()->range()->Copy()
: new Range();
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
result->set_can_be_minus_zero(false);
return result;
}
}
return HValue::InferRange();
return HValue::InferRange(zone);
}
Range* HShr::InferRange() {
Range* HShr::InferRange(Zone* zone) {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
if (c->HasInteger32Value()) {
@ -1365,53 +1388,54 @@ Range* HShr::InferRange() {
if (left()->range()->CanBeNegative()) {
// Only compute bounds if the result always fits into an int32.
return (shift_count >= 1)
? new Range(0, static_cast<uint32_t>(0xffffffff) >> shift_count)
: new Range();
? new(zone) Range(0,
static_cast<uint32_t>(0xffffffff) >> shift_count)
: new(zone) Range();
} else {
// For positive inputs we can use the >> operator.
Range* result = (left()->range() != NULL)
? left()->range()->Copy()
: new Range();
? left()->range()->Copy(zone)
: new(zone) Range();
result->Sar(c->Integer32Value());
result->set_can_be_minus_zero(false);
return result;
}
}
}
return HValue::InferRange();
return HValue::InferRange(zone);
}
Range* HShl::InferRange() {
Range* HShl::InferRange(Zone* zone) {
if (right()->IsConstant()) {
HConstant* c = HConstant::cast(right());
if (c->HasInteger32Value()) {
Range* result = (left()->range() != NULL)
? left()->range()->Copy()
: new Range();
? left()->range()->Copy(zone)
: new(zone) Range();
result->Shl(c->Integer32Value());
result->set_can_be_minus_zero(false);
return result;
}
}
return HValue::InferRange();
return HValue::InferRange(zone);
}
Range* HLoadKeyedSpecializedArrayElement::InferRange() {
Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new Range(0, 255);
return new(zone) Range(0, 255);
case EXTERNAL_BYTE_ELEMENTS:
return new Range(-128, 127);
return new(zone) Range(-128, 127);
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return new Range(0, 255);
return new(zone) Range(0, 255);
case EXTERNAL_SHORT_ELEMENTS:
return new Range(-32768, 32767);
return new(zone) Range(-32768, 32767);
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return new Range(0, 65535);
return new(zone) Range(0, 65535);
default:
return HValue::InferRange();
return HValue::InferRange(zone);
}
}
@ -1456,7 +1480,22 @@ void HGoto::PrintDataTo(StringStream* stream) {
void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
input_representation_ = r;
if (r.IsDouble()) {
SetFlag(kDeoptimizeOnUndefined);
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
// calling ToPrimitive() on their arguments. The standard Crankshaft
// tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
// are doubles caused 'undefined' to be converted to NaN. That's compatible
// out-of-the box with ordered relational comparisons (<, >, <=,
// >=). However, for equality comparisons (and for 'in' and 'instanceof'),
// it is not consistent with the spec. For example, it would cause undefined
// == undefined (should be true) to be evaluated as NaN == NaN
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
} else {
ASSERT(r.IsInteger32());
}
@ -1923,6 +1962,11 @@ HType HStringCharFromCode::CalculateInferredType() {
}
HType HAllocateObject::CalculateInferredType() {
return HType::JSObject();
}
HType HFastLiteral::CalculateInferredType() {
// TODO(mstarzinger): Be smarter, could also be JSArray here.
return HType::JSObject();

155
deps/v8/src/hydrogen-instructions.h

@ -62,6 +62,7 @@ class LChunkBuilder;
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -173,7 +174,6 @@ class LChunkBuilder;
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(ToInt32) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
@ -184,7 +184,8 @@ class LChunkBuilder;
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
V(LoadFieldByIndex) \
V(DateField)
#define GVN_FLAG_LIST(V) \
V(Calls) \
@ -235,10 +236,14 @@ class Range: public ZoneObject {
int32_t upper() const { return upper_; }
int32_t lower() const { return lower_; }
Range* next() const { return next_; }
Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
Range* Copy() const {
Range* result = new Range(lower_, upper_);
Range* CopyClearLower(Zone* zone) const {
return new(zone) Range(kMinInt, upper_);
}
Range* CopyClearUpper(Zone* zone) const {
return new(zone) Range(lower_, kMaxInt);
}
Range* Copy(Zone* zone) const {
Range* result = new(zone) Range(lower_, upper_);
result->set_can_be_minus_zero(CanBeMinusZero());
return result;
}
@ -640,6 +645,9 @@ class HValue: public ZoneObject {
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
// Returns true if the flag specified is set for all uses, false otherwise.
bool CheckUsesForFlag(Flag f);
GVNFlagSet gvn_flags() const { return gvn_flags_; }
void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
@ -682,9 +690,9 @@ class HValue: public ZoneObject {
Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; }
void AddNewRange(Range* r);
void AddNewRange(Range* r, Zone* zone);
void RemoveLastAddedRange();
void ComputeInitialRange();
void ComputeInitialRange(Zone* zone);
// Representation helpers.
virtual Representation RequiredInputRepresentation(int index) = 0;
@ -729,7 +737,7 @@ class HValue: public ZoneObject {
return false;
}
virtual void RepresentationChanged(Representation to) { }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
void clear_block() {
@ -819,6 +827,8 @@ class HInstruction: public HValue {
bool has_position() const { return position_ != RelocInfo::kNoPosition; }
void set_position(int position) { position_ = position; }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
@ -1116,10 +1126,6 @@ class HUnaryOperation: public HTemplateInstruction<1> {
return reinterpret_cast<HUnaryOperation*>(value);
}
virtual bool CanTruncateToInt32() const {
return CheckFlag(kTruncatingToInt32);
}
HValue* value() { return OperandAt(0); }
virtual void PrintDataTo(StringStream* stream);
};
@ -1207,7 +1213,7 @@ class HChange: public HUnaryOperation {
return from();
}
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
virtual void PrintDataTo(StringStream* stream);
@ -1237,37 +1243,6 @@ class HClampToUint8: public HUnaryOperation {
};
class HToInt32: public HUnaryOperation {
public:
explicit HToInt32(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
virtual bool CanTruncateToInt32() const {
return true;
}
virtual HValue* Canonicalize() {
if (value()->representation().IsInteger32()) {
return value();
} else {
return this;
}
}
DECLARE_CONCRETE_INSTRUCTION(ToInt32)
protected:
virtual bool DataEquals(HValue* other) { return true; }
};
class HSimulate: public HInstruction {
public:
HSimulate(int ast_id, int pop_count)
@ -1376,11 +1351,13 @@ class HEnterInlined: public HTemplateInstruction<0> {
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
CallKind call_kind)
CallKind call_kind,
bool is_construct)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind) {
call_kind_(call_kind),
is_construct_(is_construct) {
}
virtual void PrintDataTo(StringStream* stream);
@ -1389,6 +1366,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count() const { return arguments_count_; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
bool is_construct() const { return is_construct_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@ -1401,6 +1379,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count_;
FunctionLiteral* function_;
CallKind call_kind_;
bool is_construct_;
};
@ -1908,6 +1887,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathLog:
case kMathSin:
case kMathCos:
case kMathTan:
set_representation(Representation::Double());
break;
default:
@ -1938,6 +1918,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathLog:
case kMathSin:
case kMathCos:
case kMathTan:
return Representation::Double();
case kMathAbs:
return representation();
@ -2294,7 +2275,7 @@ class HPhi: public HValue {
return Representation::None();
}
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
@ -2472,7 +2453,7 @@ class HConstant: public HTemplateInstruction<0> {
DECLARE_CONCRETE_INSTRUCTION(Constant)
protected:
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
virtual bool DataEquals(HValue* other) {
HConstant* other_constant = HConstant::cast(other);
@ -3145,12 +3126,14 @@ class HAdd: public HArithmeticBinaryOperation {
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
};
@ -3163,6 +3146,8 @@ class HSub: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HValue* Canonicalize();
static HInstruction* NewHSub(Zone* zone,
HValue* context,
HValue* left,
@ -3173,7 +3158,7 @@ class HSub: public HArithmeticBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
};
@ -3201,7 +3186,7 @@ class HMul: public HArithmeticBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
};
@ -3234,7 +3219,7 @@ class HMod: public HArithmeticBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
};
@ -3248,7 +3233,6 @@ class HDiv: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
static HInstruction* NewHDiv(Zone* zone,
HValue* context,
HValue* left,
@ -3259,7 +3243,7 @@ class HDiv: public HArithmeticBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
};
@ -3291,7 +3275,7 @@ class HBitwise: public HBitwiseBinaryOperation {
return op() == HBitwise::cast(other)->op();
}
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
private:
Token::Value op_;
@ -3303,7 +3287,7 @@ class HShl: public HBitwiseBinaryOperation {
HShl(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
static HInstruction* NewHShl(Zone* zone,
HValue* context,
@ -3322,7 +3306,7 @@ class HShr: public HBitwiseBinaryOperation {
HShr(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
static HInstruction* NewHShr(Zone* zone,
HValue* context,
@ -3341,7 +3325,7 @@ class HSar: public HBitwiseBinaryOperation {
HSar(HValue* context, HValue* left, HValue* right)
: HBitwiseBinaryOperation(context, left, right) { }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
static HInstruction* NewHSar(Zone* zone,
HValue* context,
@ -3931,7 +3915,7 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
virtual Range* InferRange();
virtual Range* InferRange(Zone* zone);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
@ -4299,8 +4283,8 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxUC16CharCode);
virtual Range* InferRange(Zone* zone) {
return new(zone) Range(0, String::kMaxUC16CharCode);
}
};
@ -4352,12 +4336,35 @@ class HStringLength: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxLength);
virtual Range* InferRange(Zone* zone) {
return new(zone) Range(0, String::kMaxLength);
}
};
class HAllocateObject: public HTemplateInstruction<1> {
public:
HAllocateObject(HValue* context, Handle<JSFunction> constructor)
: constructor_(constructor) {
SetOperandAt(0, context);
set_representation(Representation::Tagged());
}
HValue* context() { return OperandAt(0); }
Handle<JSFunction> constructor() { return constructor_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
private:
Handle<JSFunction> constructor_;
};
template <int V>
class HMaterializedLiteral: public HTemplateInstruction<V> {
public:
@ -4596,6 +4603,26 @@ class HValueOf: public HUnaryOperation {
};
class HDateField: public HUnaryOperation {
public:
HDateField(HValue* date, Smi* index)
: HUnaryOperation(date), index_(index) {
set_representation(Representation::Tagged());
}
Smi* index() const { return index_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(DateField)
private:
Smi* index_;
};
class HDeleteProperty: public HBinaryOperation {
public:
HDeleteProperty(HValue* context, HValue* obj, HValue* key)

483
deps/v8/src/hydrogen.cc

@ -600,7 +600,7 @@ HConstant* HGraph::GetConstantHole() {
HGraphBuilder::HGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: function_state_(NULL),
initial_function_state_(this, info, oracle, false),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
graph_(NULL),
@ -730,7 +730,7 @@ HBasicBlock* HGraph::CreateBasicBlock() {
void HGraph::Canonicalize() {
if (!FLAG_use_canonicalizing) return;
HPhase phase("Canonicalize", this);
HPhase phase("H_Canonicalize", this);
for (int i = 0; i < blocks()->length(); ++i) {
HInstruction* instr = blocks()->at(i)->first();
while (instr != NULL) {
@ -743,7 +743,7 @@ void HGraph::Canonicalize() {
void HGraph::OrderBlocks() {
HPhase phase("Block ordering");
HPhase phase("H_Block ordering");
BitVector visited(blocks_.length(), zone());
ZoneList<HBasicBlock*> reverse_result(8);
@ -805,7 +805,7 @@ void HGraph::Postorder(HBasicBlock* block,
void HGraph::AssignDominators() {
HPhase phase("Assign dominators", this);
HPhase phase("H_Assign dominators", this);
for (int i = 0; i < blocks_.length(); ++i) {
HBasicBlock* block = blocks_[i];
if (block->IsLoopHeader()) {
@ -824,7 +824,7 @@ void HGraph::AssignDominators() {
// Mark all blocks that are dominated by an unconditional soft deoptimize to
// prevent code motion across those blocks.
void HGraph::PropagateDeoptimizingMark() {
HPhase phase("Propagate deoptimizing mark", this);
HPhase phase("H_Propagate deoptimizing mark", this);
MarkAsDeoptimizingRecursively(entry_block());
}
@ -837,7 +837,7 @@ void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
}
void HGraph::EliminateRedundantPhis() {
HPhase phase("Redundant phi elimination", this);
HPhase phase("H_Redundant phi elimination", this);
// Worklist of phis that can potentially be eliminated. Initialized with
// all phi nodes. When elimination of a phi node modifies another phi node
@ -871,7 +871,7 @@ void HGraph::EliminateRedundantPhis() {
void HGraph::EliminateUnreachablePhis() {
HPhase phase("Unreachable phi elimination", this);
HPhase phase("H_Unreachable phi elimination", this);
// Initialize worklist.
ZoneList<HPhi*> phi_list(blocks_.length());
@ -979,7 +979,8 @@ void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
class HRangeAnalysis BASE_EMBEDDED {
public:
explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
explicit HRangeAnalysis(HGraph* graph) :
graph_(graph), zone_(graph->isolate()->zone()), changed_ranges_(16) { }
void Analyze();
@ -993,6 +994,7 @@ class HRangeAnalysis BASE_EMBEDDED {
void AddRange(HValue* value, Range* range);
HGraph* graph_;
Zone* zone_;
ZoneList<HValue*> changed_ranges_;
};
@ -1008,7 +1010,7 @@ void HRangeAnalysis::TraceRange(const char* msg, ...) {
void HRangeAnalysis::Analyze() {
HPhase phase("Range analysis", graph_);
HPhase phase("H_Range analysis", graph_);
Analyze(graph_->entry_block());
}
@ -1079,14 +1081,14 @@ void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
if (op == Token::EQ || op == Token::EQ_STRICT) {
// The same range has to apply for value.
new_range = range->Copy();
new_range = range->Copy(zone_);
} else if (op == Token::LT || op == Token::LTE) {
new_range = range->CopyClearLower();
new_range = range->CopyClearLower(zone_);
if (op == Token::LT) {
new_range->AddConstant(-1);
}
} else if (op == Token::GT || op == Token::GTE) {
new_range = range->CopyClearUpper();
new_range = range->CopyClearUpper(zone_);
if (op == Token::GT) {
new_range->AddConstant(1);
}
@ -1101,7 +1103,7 @@ void HRangeAnalysis::UpdateControlFlowRange(Token::Value op,
void HRangeAnalysis::InferRange(HValue* value) {
ASSERT(!value->HasRange());
if (!value->representation().IsNone()) {
value->ComputeInitialRange();
value->ComputeInitialRange(zone_);
Range* range = value->range();
TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
value->id(),
@ -1122,7 +1124,7 @@ void HRangeAnalysis::RollBackTo(int index) {
void HRangeAnalysis::AddRange(HValue* value, Range* range) {
Range* original_range = value->range();
value->AddNewRange(range);
value->AddNewRange(range, zone_);
changed_ranges_.Add(value);
Range* new_range = value->range();
TraceRange("Updated range of %d set to [%d,%d]\n",
@ -1483,6 +1485,11 @@ void HGlobalValueNumberer::ComputeBlockSideEffects() {
GVNFlagSet side_effects;
while (instr != NULL) {
side_effects.Add(instr->ChangesFlags());
if (instr->IsSoftDeoptimize()) {
block_side_effects_[id].RemoveAll();
side_effects.RemoveAll();
break;
}
instr = instr->next();
}
block_side_effects_[id].Add(side_effects);
@ -1829,7 +1836,7 @@ Representation HInferRepresentation::TryChange(HValue* value) {
void HInferRepresentation::Analyze() {
HPhase phase("Infer representations", graph_);
HPhase phase("H_Infer representations", graph_);
// (1) Initialize bit vectors and count real uses. Each phi gets a
// bit-vector of length <number of phis>.
@ -1908,7 +1915,7 @@ void HInferRepresentation::Analyze() {
void HGraph::InitializeInferredTypes() {
HPhase phase("Inferring types", this);
HPhase phase("H_Inferring types", this);
InitializeInferredTypes(0, this->blocks_.length() - 1);
}
@ -2045,8 +2052,7 @@ void HGraph::InsertRepresentationChangesForValue(HValue* value) {
void HGraph::InsertRepresentationChanges() {
HPhase phase("Insert representation changes", this);
HPhase phase("H_Representation changes", this);
// Compute truncation flag for phis: Initially assume that all
// int32-phis allow truncation and iteratively remove the ones that
@ -2065,13 +2071,9 @@ void HGraph::InsertRepresentationChanges() {
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
phi->ClearFlag(HValue::kTruncatingToInt32);
change = true;
break;
}
if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
phi->ClearFlag(HValue::kTruncatingToInt32);
change = true;
}
}
}
@ -2106,7 +2108,7 @@ void HGraph::RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi) {
void HGraph::MarkDeoptimizeOnUndefined() {
HPhase phase("MarkDeoptimizeOnUndefined", this);
HPhase phase("H_MarkDeoptimizeOnUndefined", this);
// Compute DeoptimizeOnUndefined flag for phis.
// Any phi that can reach a use with DeoptimizeOnUndefined set must
// have DeoptimizeOnUndefined set. Currently only HCompareIDAndBranch, with
@ -2156,12 +2158,12 @@ void HGraph::ComputeMinusZeroChecks() {
FunctionState::FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
bool drop_extra)
ReturnHandlingFlag return_handling)
: owner_(owner),
compilation_info_(info),
oracle_(oracle),
call_context_(NULL),
drop_extra_(drop_extra),
return_handling_(return_handling),
function_return_(NULL),
test_context_(NULL),
outer_(owner->function_state()) {
@ -2204,7 +2206,7 @@ AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
for_typeof_(false) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
ASSERT(!owner->environment()->is_arguments_adaptor());
ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
original_length_ = owner->environment()->length();
#endif
}
@ -2219,7 +2221,7 @@ EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ &&
!owner()->environment()->is_arguments_adaptor()));
owner()->environment()->frame_type() == JS_FUNCTION));
}
@ -2227,7 +2229,7 @@ ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
(owner()->environment()->length() == original_length_ + 1 &&
!owner()->environment()->is_arguments_adaptor()));
owner()->environment()->frame_type() == JS_FUNCTION));
}
@ -2432,7 +2434,7 @@ HGraph* HGraphBuilder::CreateGraph() {
if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
{
HPhase phase("Block building");
HPhase phase("H_Block building");
current_block_ = graph()->entry_block();
Scope* scope = info()->scope();
@ -2466,7 +2468,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Handle implicit declaration of the function name in named function
// expressions before other declarations.
if (scope->is_function_scope() && scope->function() != NULL) {
HandleVariableDeclaration(scope->function(), CONST, NULL, NULL);
HandleDeclaration(scope->function(), CONST, NULL, NULL);
}
VisitDeclarations(scope->declarations());
AddSimulate(AstNode::kDeclarationsId);
@ -2517,7 +2519,7 @@ HGraph* HGraphBuilder::CreateGraph() {
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
HPhase phase("Global value numbering", graph());
HPhase phase("H_Global value numbering", graph());
HGlobalValueNumberer gvn(graph(), info());
bool removed_side_effects = gvn.Analyze();
// Trigger a second analysis pass to further eliminate duplicate values that
@ -2550,7 +2552,7 @@ HGraph* HGraphBuilder::CreateGraph() {
void HGraph::ReplaceCheckedValues() {
HPhase phase("Replace checked values", this);
HPhase phase("H_Replace checked values", this);
for (int i = 0; i < blocks()->length(); ++i) {
HInstruction* instr = blocks()->at(i)->first();
while (instr != NULL) {
@ -2590,8 +2592,8 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) {
}
template <int V>
HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
template <class Instruction>
HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count);
for (int i = 0; i < count; ++i) {
@ -2819,7 +2821,38 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
current_block()->FinishExit(new(zone()) HReturn(result));
set_current_block(NULL);
} else if (function_state()->is_construct()) {
// Return from an inlined construct call. In a test context the return
// value will always evaluate to true, in a value context the return value
// needs to be a JSObject.
if (context->IsTest()) {
TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
current_block()->Goto(test->if_true(), function_state()->drop_extra());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
current_block()->Goto(function_return(), function_state()->drop_extra());
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = Pop();
HValue* receiver = environment()->Lookup(0);
HHasInstanceTypeAndBranch* typecheck =
new(zone()) HHasInstanceTypeAndBranch(return_value,
FIRST_SPEC_OBJECT_TYPE,
LAST_SPEC_OBJECT_TYPE);
HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
typecheck->SetSuccessorAt(0, if_spec_object);
typecheck->SetSuccessorAt(1, not_spec_object);
current_block()->Finish(typecheck);
if_spec_object->AddLeaveInlined(return_value,
function_return(),
function_state()->drop_extra());
not_spec_object->AddLeaveInlined(receiver,
function_return(),
function_state()->drop_extra());
}
} else {
// Return from an inlined function, visit the subexpression in the
// expression context of the call.
@ -2834,13 +2867,13 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
} else {
ASSERT(context->IsValue());
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* return_value = environment()->Pop();
HValue* return_value = Pop();
current_block()->AddLeaveInlined(return_value,
function_return(),
function_state()->drop_extra());
}
set_current_block(NULL);
}
set_current_block(NULL);
}
@ -3075,7 +3108,6 @@ void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
}
}
AddSimulate(osr_entry_id);
AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
HContext* context = new(zone()) HContext;
@ -3246,6 +3278,10 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
return Bailout("ForInStatement optimization is disabled");
}
if (!oracle()->IsForInFastCase(stmt)) {
return Bailout("ForInStatement is not fast case");
}
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
return Bailout("ForInStatement with non-local each variable");
@ -3256,10 +3292,8 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
CHECK_ALIVE(VisitForValue(stmt->enumerable()));
HValue* enumerable = Top(); // Leave enumerable at the top.
HValue* context = environment()->LookupContext();
HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
context, enumerable));
environment()->LookupContext(), enumerable));
AddSimulate(stmt->PrepareId());
HInstruction* array = AddInstruction(
@ -3336,9 +3370,11 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
PushAndAdd(
new(zone()) HAdd(context, current_index, graph()->GetConstant1()));
HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(),
current_index,
graph()->GetConstant1());
new_index->AssumeRepresentation(Representation::Integer32());
PushAndAdd(new_index);
body_exit = current_block();
}
@ -4486,9 +4522,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
ASSERT(val != NULL);
switch (elements_kind) {
case EXTERNAL_PIXEL_ELEMENTS: {
HClampToUint8* clamp = new(zone()) HClampToUint8(val);
AddInstruction(clamp);
val = clamp;
val = AddInstruction(new(zone()) HClampToUint8(val));
break;
}
case EXTERNAL_BYTE_ELEMENTS:
@ -4497,9 +4531,13 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
HToInt32* floor_val = new(zone()) HToInt32(val);
AddInstruction(floor_val);
val = floor_val;
if (!val->representation().IsInteger32()) {
val = AddInstruction(new(zone()) HChange(
val,
Representation::Integer32(),
true, // Truncate to int32.
false)); // Don't deoptimize undefined (irrelevant here).
}
break;
}
case EXTERNAL_FLOAT_ELEMENTS:
@ -4516,6 +4554,7 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
return new(zone()) HStoreKeyedSpecializedArrayElement(
external_elements, checked_key, val, elements_kind);
} else {
ASSERT(val == NULL);
return new(zone()) HLoadKeyedSpecializedArrayElement(
external_elements, checked_key, elements_kind);
}
@ -5002,7 +5041,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
PrintF("Trying to inline the polymorphic call to %s\n",
*name->ToCString());
}
if (FLAG_polymorphic_inlining && TryInline(expr)) {
if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
// Trying to inline will signal that we should bailout from the
// entire compilation by setting stack overflow on the visitor.
if (HasStackOverflow()) return;
@ -5072,19 +5111,18 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
}
bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
bool HGraphBuilder::TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
HValue* receiver,
int ast_id,
int return_id,
ReturnHandlingFlag return_handling) {
if (!FLAG_use_inlining) return false;
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
// Precondition: call is monomorphic and we have found a target with the
// appropriate arity.
Handle<JSFunction> caller = info()->closure();
Handle<JSFunction> target = expr->target();
Handle<SharedFunctionInfo> target_shared(target->shared());
// Do a quick check on source code length to avoid parsing large
@ -5132,7 +5170,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
if (!env->outer()->is_arguments_adaptor()) {
if (env->outer()->frame_type() == JS_FUNCTION) {
current_level++;
}
env = env->outer();
@ -5240,16 +5278,17 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
isolate());
// The function state is new-allocated because we need to delete it
// in two different places.
FunctionState* target_state =
new FunctionState(this, &target_info, &target_oracle, drop_extra);
FunctionState* target_state = new FunctionState(
this, &target_info, &target_oracle, return_handling);
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
expr->arguments()->length(),
arguments->length(),
function,
undefined,
call_kind);
call_kind,
function_state()->is_construct());
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
@ -5261,14 +5300,13 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
AddInstruction(context);
inner_env->BindContext(context);
#endif
HBasicBlock* body_entry = CreateBasicBlock(inner_env);
current_block()->Goto(body_entry);
body_entry->SetJoinId(expr->ReturnId());
set_current_block(body_entry);
AddSimulate(return_id);
current_block()->UpdateEnvironment(inner_env);
AddInstruction(new(zone()) HEnterInlined(target,
expr->arguments()->length(),
arguments->length(),
function,
call_kind));
call_kind,
function_state()->is_construct()));
VisitDeclarations(target_info.scope()->declarations());
VisitStatements(function->body());
if (HasStackOverflow()) {
@ -5287,32 +5325,27 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
TraceInline(target, caller, NULL);
if (current_block() != NULL) {
// Add a return of undefined if control can fall off the body. In a
// test context, undefined is false.
if (inlined_test_context() == NULL) {
// Add default return value (i.e. undefined for normals calls or the newly
// allocated receiver for construct calls) if control can fall off the
// body. In a test context, undefined is false and any JSObject is true.
if (call_context()->IsValue()) {
ASSERT(function_return() != NULL);
ASSERT(call_context()->IsEffect() || call_context()->IsValue());
if (call_context()->IsEffect()) {
current_block()->Goto(function_return(), drop_extra);
} else {
current_block()->AddLeaveInlined(undefined,
function_return(),
drop_extra);
}
HValue* return_value = function_state()->is_construct()
? receiver
: undefined;
current_block()->AddLeaveInlined(return_value,
function_return(),
function_state()->drop_extra());
} else if (call_context()->IsEffect()) {
ASSERT(function_return() != NULL);
current_block()->Goto(function_return(), function_state()->drop_extra());
} else {
// The graph builder assumes control can reach both branches of a
// test, so we materialize the undefined value and test it rather than
// simply jumping to the false target.
//
// TODO(3168478): refactor to avoid this.
ASSERT(call_context()->IsTest());
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
current_block()->Finish(test);
empty_true->Goto(inlined_test_context()->if_true(), drop_extra);
empty_false->Goto(inlined_test_context()->if_false(), drop_extra);
ASSERT(inlined_test_context() != NULL);
HBasicBlock* target = function_state()->is_construct()
? inlined_test_context()->if_true()
: inlined_test_context()->if_false();
current_block()->Goto(target, function_state()->drop_extra());
}
}
@ -5328,12 +5361,12 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
// Forward to the real test context.
if (if_true->HasPredecessor()) {
if_true->SetJoinId(expr->id());
if_true->SetJoinId(ast_id);
HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
if_true->Goto(true_target, function_state()->drop_extra());
}
if (if_false->HasPredecessor()) {
if_false->SetJoinId(expr->id());
if_false->SetJoinId(ast_id);
HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
if_false->Goto(false_target, function_state()->drop_extra());
}
@ -5341,7 +5374,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
return true;
} else if (function_return()->HasPredecessor()) {
function_return()->SetJoinId(expr->id());
function_return()->SetJoinId(ast_id);
set_current_block(function_return());
} else {
set_current_block(NULL);
@ -5351,6 +5384,34 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
}
bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
return TryInline(call_kind,
expr->target(),
expr->arguments(),
NULL,
expr->id(),
expr->ReturnId(),
drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
}
bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
expr->arguments(),
receiver,
expr->id(),
expr->ReturnId(),
CONSTRUCT_CALL_RETURN);
}
bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
@ -5361,6 +5422,7 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
case kMathLog:
case kMathSin:
case kMathCos:
case kMathTan:
if (expr->arguments()->length() == 1) {
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
@ -5421,6 +5483,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathLog:
case kMathSin:
case kMathCos:
case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop();
@ -5680,7 +5743,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
if (TryInline(expr)) return;
if (TryInlineCall(expr)) return;
call = PreProcessCall(
new(zone()) HCallConstantFunction(expr->target(),
argument_count));
@ -5744,7 +5807,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
return;
}
if (TryInline(expr)) return;
if (TryInlineCall(expr)) return;
call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
argument_count));
} else {
@ -5780,7 +5843,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
}
if (TryInline(expr, true)) { // Drop function from environment.
if (TryInlineCall(expr, true)) { // Drop function from environment.
return;
} else {
call = PreProcessCall(new(zone()) HInvokeFunction(context,
@ -5810,25 +5873,72 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE;
}
void HGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
// The constructor function is also used as the receiver argument to the
// JS construct call builtin.
HValue* constructor = NULL;
CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
expr->RecordTypeFeedback(oracle());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* context = environment()->LookupContext();
// The constructor is both an operand to the instruction and an argument
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
Drop(arg_count);
return ast_context()->ReturnInstruction(call, expr->id());
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
// The constructor function is on the stack in the unoptimized code
// during evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<JSFunction> constructor = expr->target();
HValue* check = AddInstruction(
new(zone()) HCheckFunction(function, constructor));
// Force completion of inobject slack tracking before generating
// allocation code to finalize instance size.
if (constructor->shared()->IsInobjectSlackTrackingInProgress()) {
constructor->shared()->CompleteInobjectSlackTracking();
}
// Replace the constructor function with a newly allocated receiver.
HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
// Index of the receiver from the top of the expression stack.
const int receiver_index = argument_count - 1;
AddInstruction(receiver);
ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
environment()->SetExpressionStackAt(receiver_index, receiver);
if (TryInlineConstruct(expr, receiver)) return;
// TODO(mstarzinger): For now we remove the previous HAllocateObject and
// add HPushArgument for the arguments in case inlining failed. What we
// actually should do is emit HInvokeFunction on the constructor instead
// of using HCallNew as a fallback.
receiver->DeleteAndReplaceWith(NULL);
check->DeleteAndReplaceWith(NULL);
environment()->SetExpressionStackAt(receiver_index, function);
HInstruction* call = PreProcessCall(
new(zone()) HCallNew(context, function, argument_count));
call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
} else {
// The constructor function is both an operand to the instruction and an
// argument to the construct call.
HValue* constructor = NULL;
CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
HInstruction* call =
new(zone()) HCallNew(context, constructor, argument_count);
Drop(argument_count);
call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@ -6544,15 +6654,6 @@ static bool IsLiteralCompareNil(HValue* left,
}
static bool IsLiteralCompareBool(HValue* left,
Token::Value op,
HValue* right) {
return op == Token::EQ_STRICT &&
((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
(right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
}
void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@ -6600,12 +6701,6 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
}
if (IsLiteralCompareBool(left, op, right)) {
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
}
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not
@ -6733,20 +6828,16 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
UNREACHABLE();
}
void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int global_count = 0;
for (int i = 0; i < declarations->length(); i++) {
VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration();
if (decl == NULL) continue;
HandleVariableDeclaration(decl->proxy(),
decl->mode(),
decl->fun(),
&global_count);
Declaration* decl = declarations->at(i);
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
HandleDeclaration(decl->proxy(),
decl->mode(),
fun_decl != NULL ? fun_decl->fun() : NULL,
&global_count);
}
// Batch declare global functions and variables.
@ -6754,13 +6845,13 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
for (int j = 0, i = 0; i < length; i++) {
VariableDeclaration* decl = declarations->at(i)->AsVariableDeclaration();
if (decl == NULL) continue;
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
if (var->IsUnallocated()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
if (fun_decl == NULL) {
if (var->binding_needs_init()) {
// In case this binding needs initialization use the hole.
array->set_the_hole(j++);
@ -6769,7 +6860,7 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(decl->fun(), info()->script());
Compiler::BuildFunctionInfo(fun_decl->fun(), info()->script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
@ -6791,10 +6882,10 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
}
void HGraphBuilder::HandleVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count) {
Variable* var = proxy->var();
bool binding_needs_init =
(mode == CONST || mode == CONST_HARMONY || mode == LET);
@ -6830,8 +6921,28 @@ void HGraphBuilder::HandleVariableDeclaration(VariableProxy* proxy,
}
void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
UNREACHABLE();
}
void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
UNREACHABLE();
}
void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
// TODO(rossberg)
UNREACHABLE();
}
void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
UNREACHABLE();
}
void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
UNREACHABLE();
}
@ -6953,10 +7064,11 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function. Currently
// constructor inlining is not supported and we can just return
// false from %_IsConstructCall().
return ast_context()->ReturnValue(graph()->GetConstantFalse());
// We are generating graph for inlined function.
HValue* value = function_state()->is_construct()
? graph()->GetConstantTrue()
: graph()->GetConstantFalse();
return ast_context()->ReturnValue(value);
} else {
return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
call->id());
@ -7010,6 +7122,17 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
}
void HGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* date = Pop();
HDateField* result = new(zone()) HDateField(date, index);
return ast_context()->ReturnInstruction(result, call->id());
}
void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@ -7344,14 +7467,14 @@ HEnvironment::HEnvironment(HEnvironment* outer,
: closure_(closure),
values_(0),
assigned_variables_(4),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
local_count_(0),
outer_(outer),
pop_count_(0),
push_count_(0),
ast_id_(AstNode::kNoNumber),
arguments_adaptor_(false) {
ast_id_(AstNode::kNoNumber) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
@ -7359,31 +7482,32 @@ HEnvironment::HEnvironment(HEnvironment* outer,
HEnvironment::HEnvironment(const HEnvironment* other)
: values_(0),
assigned_variables_(0),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
local_count_(0),
outer_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(other->ast_id()),
arguments_adaptor_(false) {
ast_id_(other->ast_id()) {
Initialize(other);
}
HEnvironment::HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
int arguments)
: closure_(closure),
values_(arguments),
assigned_variables_(0),
frame_type_(frame_type),
parameter_count_(arguments),
local_count_(0),
outer_(outer),
pop_count_(0),
push_count_(0),
ast_id_(AstNode::kNoNumber),
arguments_adaptor_(true) {
ast_id_(AstNode::kNoNumber) {
}
@ -7404,13 +7528,13 @@ void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_);
assigned_variables_.AddAll(other->assigned_variables_);
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
arguments_adaptor_ = other->arguments_adaptor_;
}
@ -7511,13 +7635,28 @@ HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
}
HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
Handle<JSFunction> target,
FrameType frame_type,
int arguments) const {
HEnvironment* new_env = new(closure()->GetIsolate()->zone())
HEnvironment(outer, target, frame_type, arguments + 1);
for (int i = 0; i <= arguments; ++i) { // Include receiver.
new_env->Push(ExpressionStackAt(arguments - i));
}
new_env->ClearHistory();
return new_env;
}
HEnvironment* HEnvironment::CopyForInlining(
Handle<JSFunction> target,
int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const {
ASSERT(!is_arguments_adaptor());
CallKind call_kind,
bool is_construct) const {
ASSERT(frame_type() == JS_FUNCTION);
Zone* zone = closure()->GetIsolate()->zone();
@ -7528,13 +7667,16 @@ HEnvironment* HEnvironment::CopyForInlining(
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
if (is_construct) {
// Create artificial constructor stub environment. The receiver should
// actually be the constructor function, but we pass the newly allocated
// object instead, DoComputeConstructStubFrame() relies on that.
outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
}
if (arity != arguments) {
// Create artificial arguments adaptation environment.
outer = new(zone) HEnvironment(outer, target, arguments + 1);
for (int i = 0; i <= arguments; ++i) { // Include receiver.
outer->Push(ExpressionStackAt(arguments - i));
}
outer->ClearHistory();
outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
}
HEnvironment* inner =
@ -7549,7 +7691,7 @@ HEnvironment* HEnvironment::CopyForInlining(
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
if ((target->shared()->native() || !function->is_classic_mode()) &&
call_kind == CALL_AS_FUNCTION) {
call_kind == CALL_AS_FUNCTION && !is_construct) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, LookupContext());
@ -7892,7 +8034,10 @@ void HPhase::End() const {
HStatistics::Instance()->SaveTiming(name_, end - start_, size);
}
if (FLAG_trace_hydrogen) {
// Produce trace output if flag is set so that the first letter of the
// phase name matches the command line parameter FLAG_trace_phase.
if (FLAG_trace_hydrogen &&
OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
if (allocator_ != NULL) {

80
deps/v8/src/hydrogen.h

@ -361,19 +361,19 @@ class HGraph: public ZoneObject {
Zone* HBasicBlock::zone() { return graph_->zone(); }
// Type of stack frame an environment might refer to.
enum FrameType { JS_FUNCTION, JS_CONSTRUCT, ARGUMENTS_ADAPTOR };
class HEnvironment: public ZoneObject {
public:
HEnvironment(HEnvironment* outer,
Scope* scope,
Handle<JSFunction> closure);
bool is_arguments_adaptor() const {
return arguments_adaptor_;
}
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_->is_arguments_adaptor() ?
outer_->outer_ : outer_;
HEnvironment* outer = outer_;
while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
if (drop_extra) outer->Drop(1);
return outer;
}
@ -384,6 +384,7 @@ class HEnvironment: public ZoneObject {
const ZoneList<int>* assigned_variables() const {
return &assigned_variables_;
}
FrameType frame_type() const { return frame_type_; }
int parameter_count() const { return parameter_count_; }
int specials_count() const { return specials_count_; }
int local_count() const { return local_count_; }
@ -469,7 +470,8 @@ class HEnvironment: public ZoneObject {
int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const;
CallKind call_kind,
bool is_construct) const;
void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
@ -490,9 +492,17 @@ class HEnvironment: public ZoneObject {
private:
explicit HEnvironment(const HEnvironment* other);
// Create an argument adaptor environment.
HEnvironment(HEnvironment* outer, Handle<JSFunction> closure, int arguments);
HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
FrameType frame_type,
int arguments);
// Create an artificial stub environment (e.g. for argument adaptor or
// constructor stub).
HEnvironment* CreateStubEnvironment(HEnvironment* outer,
Handle<JSFunction> target,
FrameType frame_type,
int arguments) const;
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
@ -515,6 +525,7 @@ class HEnvironment: public ZoneObject {
// Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
ZoneList<int> assigned_variables_;
FrameType frame_type_;
int parameter_count_;
int specials_count_;
int local_count_;
@ -522,7 +533,6 @@ class HEnvironment: public ZoneObject {
int pop_count_;
int push_count_;
int ast_id_;
bool arguments_adaptor_;
};
@ -650,18 +660,26 @@ class TestContext: public AstContext {
};
enum ReturnHandlingFlag {
NORMAL_RETURN,
DROP_EXTRA_ON_RETURN,
CONSTRUCT_CALL_RETURN
};
class FunctionState {
public:
FunctionState(HGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
bool drop_extra);
ReturnHandlingFlag return_handling);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
bool drop_extra() { return drop_extra_; }
bool drop_extra() { return return_handling_ == DROP_EXTRA_ON_RETURN; }
bool is_construct() { return return_handling_ == CONSTRUCT_CALL_RETURN; }
HBasicBlock* function_return() { return function_return_; }
TestContext* test_context() { return test_context_; }
void ClearInlinedTestContext() {
@ -681,11 +699,13 @@ class FunctionState {
// inlined. NULL when not inlining.
AstContext* call_context_;
// Indicate if we have to drop an extra value from the environment on
// return from inlined functions.
bool drop_extra_;
// Indicate whether we have to perform special handling on return from
// inlined functions.
// - DROP_EXTRA_ON_RETURN: Drop an extra value from the environment.
// - CONSTRUCT_CALL_RETURN: Either use allocated receiver or return value.
ReturnHandlingFlag return_handling_;
// When inlining in an effect of value context, this is the return block.
// When inlining in an effect or value context, this is the return block.
// It is NULL otherwise. When inlining in a test context, there are a
// pair of return blocks in the context. When not inlining, there is no
// local return point.
@ -825,7 +845,6 @@ class HGraphBuilder: public AstVisitor {
CompilationInfo* info() const {
return function_state()->compilation_info();
}
AstContext* call_context() const {
return function_state()->call_context();
}
@ -851,10 +870,10 @@ class HGraphBuilder: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
#undef INLINE_FUNCTION_GENERATOR_DECLARATION
void HandleVariableDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count);
void HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function,
int* global_count);
void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr);
@ -922,7 +941,7 @@ class HGraphBuilder: public AstVisitor {
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
template <int V> HInstruction* PreProcessCall(HCall<V>* call);
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
void TraceRepresentation(Token::Value op,
TypeInfo info,
@ -954,11 +973,20 @@ class HGraphBuilder: public AstVisitor {
// Try to optimize fun.apply(receiver, arguments) pattern.
bool TryCallApply(Call* expr);
bool TryInline(Call* expr, bool drop_extra = false);
bool TryInline(CallKind call_kind,
Handle<JSFunction> target,
ZoneList<Expression*>* arguments,
HValue* receiver,
int ast_id,
int return_id,
ReturnHandlingFlag return_handling);
bool TryInlineCall(Call* expr, bool drop_extra = false);
bool TryInlineConstruct(CallNew* expr, HValue* receiver);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
CheckType check_type);
HValue* receiver,
Handle<Map> receiver_map,
CheckType check_type);
bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
// If --trace-inlining, print a line of the inlining trace. Inlining

7
deps/v8/src/ia32/builtins-ia32.cc

@ -324,6 +324,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
// Store offset of return address for deoptimizer.
if (!is_api_function && !count_constructions) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -1639,7 +1644,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);

135
deps/v8/src/ia32/code-stubs-ia32.cc

@ -2510,7 +2510,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
GenerateOperation(masm);
GenerateOperation(masm, type_);
__ mov(Operand(ecx, 0), ebx);
__ mov(Operand(ecx, kIntSize), edx);
__ mov(Operand(ecx, 2 * kIntSize), eax);
@ -2526,7 +2526,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm);
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
@ -2578,14 +2578,15 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
}
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateOperation(
MacroAssembler* masm, TranscendentalCache::Type type) {
// Only free register is edi.
// Input value is on FP stack, and also in ebx/edx.
// Input value is possibly in xmm1.
// Address of result (a newly allocated HeapNumber) may be in eax.
if (type_ == TranscendentalCache::SIN ||
type_ == TranscendentalCache::COS ||
type_ == TranscendentalCache::TAN) {
if (type == TranscendentalCache::SIN ||
type == TranscendentalCache::COS ||
type == TranscendentalCache::TAN) {
// Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation.
@ -2649,7 +2650,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// FPU Stack: input % 2*pi
__ bind(&in_range);
switch (type_) {
switch (type) {
case TranscendentalCache::SIN:
__ fsin();
break;
@ -2667,7 +2668,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
__ bind(&done);
} else {
ASSERT(type_ == TranscendentalCache::LOG);
ASSERT(type == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
@ -6154,7 +6155,6 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// ebx: instance type
// Calculate length of sub string using the smi values.
Label result_longer_than_two;
__ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
__ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
@ -6167,43 +6167,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
__ cmp(ecx, Immediate(Smi::FromInt(2)));
__ j(greater, &result_longer_than_two);
__ j(less, &runtime);
// Sub string of length 2 requested.
// eax: string
// ebx: instance type
// ecx: sub string length (smi, value is 2)
// edx: from index (smi)
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
// Get the two characters forming the sub string.
__ SmiUntag(edx); // From index is no longer smi.
__ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
__ movzx_b(ecx,
FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
// Try to lookup two character string in symbol table.
Label combine_two_char, save_two_char;
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi, &combine_two_char, &save_two_char);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&combine_two_char);
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
__ bind(&save_two_char);
__ AllocateAsciiString(eax, 2, ecx, edx, &runtime);
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&result_longer_than_two);
// eax: string
// ebx: instance type
// ecx: sub string length (smi)
@ -6270,11 +6234,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
@ -6498,7 +6462,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, chars_not_equal_near);
__ add(index, Immediate(1));
__ inc(index);
__ j(not_zero, &loop);
}
@ -6572,16 +6536,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
__ j(not_equal, &maybe_undefined1, Label::kNear);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
__ j(not_equal, &maybe_undefined2, Label::kNear);
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SS2 or CMOV is unsupported.
@ -6607,14 +6571,28 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
__ bind(&unordered);
}
__ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
}
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(equal, &unordered);
}
__ bind(&miss);
GenerateMiss(masm);
}
@ -6667,9 +6645,10 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::STRINGS);
ASSERT(GetCondition() == equal);
Label miss;
bool equality = Token::IsEqualityOp(op_);
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
@ -6708,25 +6687,33 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ bind(&not_same);
// Check that both strings are symbols. If they are, we're done
// because we already know they are not identical.
Label do_compare;
STATIC_ASSERT(kSymbolTag != 0);
__ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
__ ret(0);
// because we already know they are not identical. But in the case of
// non-equality compare, we still need to determine the order.
if (equality) {
Label do_compare;
STATIC_ASSERT(kSymbolTag != 0);
__ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
__ ret(0);
__ bind(&do_compare);
}
// Check that both strings are sequential ASCII.
Label runtime;
__ bind(&do_compare);
__ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
// Compare flat ASCII strings. Returns when done.
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2);
if (equality) {
StringCompareStub::GenerateFlatAsciiStringEquals(
masm, left, right, tmp1, tmp2);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(
masm, left, right, tmp1, tmp2, tmp3);
}
// Handle more complex cases in runtime.
__ bind(&runtime);
@ -6734,7 +6721,11 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
__ push(left);
__ push(right);
__ push(tmp1);
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
__ bind(&miss);
GenerateMiss(masm);
@ -6823,7 +6814,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
// (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
Register index = r0;
@ -6849,11 +6840,17 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, Handle<String>(name));
__ j(equal, miss);
Label the_hole;
// Check for the hole and skip.
__ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
__ j(equal, &the_hole, Label::kNear);
// Check if the entry name is not a symbol.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
kIsSymbolMask);
__ j(zero, miss);
__ bind(&the_hole);
}
StringDictionaryLookupStub stub(properties,

3
deps/v8/src/ia32/code-stubs-ia32.h

@ -49,6 +49,8 @@ class TranscendentalCacheStub: public CodeStub {
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm);
static void GenerateOperation(MacroAssembler* masm,
TranscendentalCache::Type type);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
@ -56,7 +58,6 @@ class TranscendentalCacheStub: public CodeStub {
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm);
};

48
deps/v8/src/ia32/codegen-ia32.cc

@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen.h"
#include "heap.h"
#include "macro-assembler.h"
namespace v8 {
@ -55,6 +56,53 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ masm.
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
if (buffer == NULL) {
// Fallback to library function if function cannot be created.
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
case TranscendentalCache::TAN: return &tan;
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
}
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
// Move double input into registers.
__ push(ebx);
__ push(edx);
__ push(edi);
__ fld_d(Operand(esp, 4 * kPointerSize));
__ mov(ebx, Operand(esp, 4 * kPointerSize));
__ mov(edx, Operand(esp, 5 * kPointerSize));
TranscendentalCacheStub::GenerateOperation(&masm, type);
// The return value is expected to be on ST(0) of the FPU stack.
__ pop(edi);
__ pop(edx);
__ pop(ebx);
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<TranscendentalFunction>(buffer);
}
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}

139
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -206,7 +206,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
static const byte kJnsOffset = 0x13;
static const byte kJaeInstruction = 0x73;
static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
@ -219,8 +219,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
@ -239,13 +239,13 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
// ok:
if (FLAG_count_based_interrupts) {
ASSERT(*(call_target_address - 3) == kJnsInstruction);
ASSERT(*(call_target_address - 2) == kJnsOffset);
ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
} else {
ASSERT(*(call_target_address - 3) == kJaeInstruction);
ASSERT(*(call_target_address - 2) == kJaeOffset);
ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
}
ASSERT(*(call_target_address - 1) == kCallInstruction);
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@ -261,14 +261,14 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
ASSERT_EQ(replacement_code->entry(),
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT(*(call_target_address - 3) == kNopByteOne &&
*(call_target_address - 2) == kNopByteTwo &&
*(call_target_address - 1) == kCallInstruction);
ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
@ -467,7 +467,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@ -489,16 +488,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
@ -508,7 +504,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
@ -520,7 +515,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
@ -531,7 +525,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -541,7 +534,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -561,6 +553,110 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = 6 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
int node_id = iterator->Next();
@ -672,6 +768,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = reinterpret_cast<uint32_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(esi.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",

158
deps/v8/src/ia32/full-codegen-ia32.cc

@ -127,28 +127,6 @@ void FullCodeGenerator::Generate() {
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
// We can optionally optimize based on counters rather than statistical
// sampling.
if (info->ShouldSelfOptimize()) {
if (FLAG_trace_opt_verbose) {
PrintF("[adding self-optimization header to %s]\n",
*info->function()->debug_name()->ToCString());
}
has_self_optimization_header_ = true;
MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
JSGlobalPropertyCell* cell;
if (maybe_cell->To(&cell)) {
__ sub(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
Immediate(Smi::FromInt(1)));
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
STATIC_ASSERT(kSmiTag == 0);
__ j(zero, compile_stub);
ASSERT(masm_->pc_offset() == self_optimization_header_size());
}
}
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@ -330,6 +308,25 @@ void FullCodeGenerator::ClearAccumulator() {
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(ebx, Immediate(profiling_counter_));
__ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(delta)));
}
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
// Self-optimization is a one-off thing: if it fails, don't try again.
reset_value = Smi::kMaxValue;
}
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(reset_value)));
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
@ -342,15 +339,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
weight = Min(127, Max(1, distance / 100));
}
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(weight)));
} else {
// This version is slightly faster, but not snapshot safe.
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
InterruptStub stub;
__ CallStub(&stub);
@ -379,15 +368,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
if (FLAG_count_based_interrupts) {
// Reset the countdown.
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
} else {
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
}
EmitProfilingCounterReset();
}
__ bind(&ok);
@ -410,37 +391,28 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (FLAG_interrupt_at_exit) {
if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (FLAG_weighted_back_edges) {
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else if (FLAG_weighted_back_edges) {
int distance = masm_->pc_offset();
weight = Min(127, Max(1, distance / 100));
}
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(weight)));
} else {
// This version is slightly faster, but not snapshot safe.
__ sub(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(weight)));
}
EmitProfilingCounterDecrement(weight);
Label ok;
__ j(positive, &ok, Label::kNear);
__ push(eax);
InterruptStub stub;
__ CallStub(&stub);
__ pop(eax);
// Reset the countdown.
if (Serializer::enabled()) {
__ mov(ebx, Immediate(profiling_counter_));
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
__ mov(Operand::Cell(profiling_counter_),
Immediate(Smi::FromInt(FLAG_interrupt_budget)));
InterruptStub stub;
__ CallStub(&stub);
}
__ pop(eax);
EmitProfilingCounterReset();
__ bind(&ok);
}
#ifdef DEBUG
@ -1033,6 +1005,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register eax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
RecordTypeFeedbackCell(stmt->PrepareId(), cell);
__ LoadHeapObject(ebx, cell);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ mov(ebx, Immediate(Smi::FromInt(1))); // Smi indicates slow check
__ mov(ecx, Operand(esp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@ -1509,10 +1491,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(key);
__ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
VisitForStackValue(value);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ push(Immediate(isolate()->factory()->null_value()));
} else {
__ push(Immediate(isolate()->factory()->null_value()));
VisitForStackValue(value);
}
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
break;
@ -2400,6 +2385,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(flags);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
}
@ -2947,6 +2933,48 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
ASSERT_NE(NULL, args->at(1)->AsLiteral());
Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label runtime, done;
Register object = eax;
Register result = eax;
Register scratch = ecx;
#ifdef DEBUG
__ AbortIfSmi(object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
__ Assert(equal, "Trying to get date field from non-date.");
#endif
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
__ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
context()->Plug(result);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();

208
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -67,7 +67,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
HPhase phase("Code generation", chunk());
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(SSE2);
@ -394,10 +394,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@ -550,7 +558,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
@ -1265,6 +1273,7 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
__ JumpIfSmi(input, &done, Label::kNear);
@ -1278,6 +1287,43 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
}
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Smi* index = instr->index();
Label runtime, done;
ASSERT(object.is(result));
ASSERT(object.is(eax));
#ifdef DEBUG
__ AbortIfSmi(object);
__ CmpObjectType(object, JS_DATE_TYPE, scratch);
__ Assert(equal, "Trying to get date field from non-date.");
#endif
if (index->value() == 0) {
__ mov(result, FieldOperand(object, JSDate::kValueOffset));
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch, Operand::StaticVariable(stamp));
__ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ mov(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch);
__ mov(Operand(esp, 0), object);
__ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
}
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
@ -3042,16 +3088,64 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LRandom* instr_;
};
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
// Assert that the register size is indeed the size of each seed.
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ PrepareCallCFunction(1, ebx);
__ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
// ebx: FixedArray of the global context's random seeds
// Load state[0].
__ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
// If state[0] == 0, call runtime to initialize seeds.
__ test(ecx, ecx);
__ j(zero, deferred->entry());
// Load state[1].
__ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
// ecx: state[0]
// eax: state[1]
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
__ movzx_w(edx, ecx);
__ imul(edx, edx, 18273);
__ shr(ecx, 16);
__ add(ecx, edx);
// Save state[0].
__ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
__ movzx_w(edx, eax);
__ imul(edx, edx, 36969);
__ shr(eax, 16);
__ add(eax, edx);
// Save state[1].
__ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
__ shl(ecx, 14);
__ and_(eax, Immediate(0x3FFFF));
__ add(eax, ecx);
__ bind(deferred->exit());
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
@ -3064,6 +3158,14 @@ void LCodeGen::DoRandom(LRandom* instr) {
}
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Return value is in eax.
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
@ -4194,6 +4296,94 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
class DeferredAllocateObject: public LDeferredCode {
public:
DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ AllocateInNewSpace(instance_size,
result,
no_reg,
scratch,
deferred->entry(),
TAG_OBJECT);
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(scratch, constructor);
__ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
__ AbortIfSmi(map);
__ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
instance_size >> kPointerSizeLog2);
__ Assert(equal, "Unexpected instance size");
__ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
initial_map->pre_allocated_property_fields());
__ Assert(equal, "Unexpected pre-allocated property fields count");
__ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
initial_map->unused_property_fields());
__ Assert(equal, "Unexpected unused property fields count");
__ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
initial_map->inobject_properties());
__ Assert(equal, "Unexpected in-object property fields count");
}
// Initialize map and fields of the newly allocated object.
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
__ mov(FieldOperand(result, JSObject::kMapOffset), map);
__ mov(scratch, factory()->empty_fixed_array());
__ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
__ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
if (initial_map->inobject_properties() != 0) {
__ mov(scratch, factory()->undefined_value());
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = JSObject::kHeaderSize + i * kPointerSize;
__ mov(FieldOperand(result, property_offset), scratch);
}
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, Immediate(0));
PushSafepointRegistersScope scope(this);
__ PushHeapObject(constructor);
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, eax);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Heap* heap = isolate()->heap();

2
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -105,8 +105,10 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);

60
deps/v8/src/ia32/lithium-ia32.cc

@ -388,7 +388,7 @@ LOperand* LChunk::GetNextSpillSlot(bool is_double) {
void LChunk::MarkEmptyBlocks() {
HPhase phase("Mark empty blocks", this);
HPhase phase("L_Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
@ -551,7 +551,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new(zone()) LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
HPhase phase("L_Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@ -1005,11 +1005,12 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
ASSERT(ast_id != AstNode::kNoNumber ||
hydrogen_env->frame_type() != JS_FUNCTION);
int value_count = hydrogen_env->length();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->is_arguments_adaptor(),
hydrogen_env->frame_type(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
@ -1031,7 +1032,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
result->AddValue(op, value->representation());
}
if (!hydrogen_env->is_arguments_adaptor()) {
if (hydrogen_env->frame_type() == JS_FUNCTION) {
*argument_index_accumulator = argument_index;
}
@ -1194,7 +1195,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
return DefineSameAsFirst(result);
} else if (op == kMathSin || op == kMathCos) {
} else if (op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
@ -1646,6 +1647,14 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
}
LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
LOperand* date = UseFixed(instr->value(), eax);
LDateField* result =
new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@ -1815,34 +1824,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
HValue* value = instr->value();
Representation input_rep = value->representation();
LInstruction* result;
if (input_rep.IsDouble()) {
LOperand* reg = UseRegister(value);
LOperand* temp_reg =
CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
result = DefineAsRegister(new(zone()) LDoubleToI(reg, temp_reg));
} else if (input_rep.IsInteger32()) {
// Canonicalization should already have removed the hydrogen instruction in
// this case, since it is a noop.
UNREACHABLE();
return NULL;
} else {
ASSERT(input_rep.IsTagged());
LOperand* reg = UseRegister(value);
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve xmm1 explicitly.
LOperand* xmm_temp =
CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
result = DefineSameAsFirst(new(zone()) LTaggedToI(reg, xmm_temp));
}
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new(zone()) LReturn(UseFixed(instr->value(), eax));
}
@ -2213,6 +2194,14 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* temp = TempRegister();
LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
@ -2385,7 +2374,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
instr->call_kind(),
instr->is_construct());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;

42
deps/v8/src/ia32/lithium-ia32.h

@ -43,6 +43,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -171,7 +172,8 @@ class LCodeGen;
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
V(LoadFieldByIndex) \
V(DateField)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -1001,6 +1003,24 @@ class LValueOf: public LTemplateInstruction<1, 1, 1> {
};
class LDateField: public LTemplateInstruction<1, 1, 1> {
public:
LDateField(LOperand* date, LOperand* temp, Smi* index)
: index_(index) {
inputs_[0] = date;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
DECLARE_HYDROGEN_ACCESSOR(DateField)
Smi* index() const { return index_; }
private:
Smi* index_;
};
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
@ -1995,6 +2015,20 @@ class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
};
class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
public:
LAllocateObject(LOperand* context, LOperand* temp) {
inputs_[0] = context;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
LOperand* context() { return inputs_[0]; }
};
class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFastLiteral(LOperand* context) {
@ -2307,7 +2341,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
isolate_(graph->isolate()),
zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@ -2337,7 +2371,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() { return isolate_->zone(); }
Zone* zone() { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@ -2446,7 +2480,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Isolate* isolate_;
Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;

2
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -1085,7 +1085,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
MaybeObject* result = Execution::HandleStackGuardInterrupt();
MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid
int delta = code_handle->address() - re_code->address();

96
deps/v8/src/ia32/stub-cache-ia32.cc

@ -44,19 +44,30 @@ static void ProbeTable(Isolate* isolate,
Code::Flags flags,
StubCache::Table table,
Register name,
Register receiver,
// Number of the cache entry pointer-size scaled.
Register offset,
Register extra) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
if (extra.is_valid()) {
// Get the code entry from the cache.
__ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
__ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Check that the flags match what we're looking for.
@ -65,6 +76,14 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(extra);
@ -75,11 +94,19 @@ static void ProbeTable(Isolate* isolate,
__ push(offset);
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
__ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
__ j(not_equal, &miss);
// Check the map matches.
__ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
__ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Restore offset register.
__ mov(offset, Operand(esp, 0));
// Get the code entry from the cache.
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
@ -87,9 +114,17 @@ static void ProbeTable(Isolate* isolate,
__ cmp(offset, flags);
__ j(not_equal, &miss);
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Restore offset and re-load code entry from cache.
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
// Jump to the first instruction in the code stub.
__ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
@ -159,12 +194,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Label miss;
// Assert that code is valid. The shifting code relies on the entry size
// being 8.
ASSERT(sizeof(Entry) == 8);
// Assert that code is valid. The multiplying code relies on the entry size
// being 12.
ASSERT(sizeof(Entry) == 12);
// Assert the flags do not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -176,37 +212,51 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
// Assert scratch and extra registers are valid, and extra2 is unused.
// Assert scratch and extra registers are valid, and extra2/3 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra2.is(no_reg));
ASSERT(extra3.is(no_reg));
Register offset = scratch;
scratch = no_reg;
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ mov(offset, FieldOperand(name, String::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// ProbeTable expects the offset to be pointer scaled, which it is, because
// the heap object tag size is 2 and the pointer size log 2 is also 2.
ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
// Probe the primary table.
ProbeTable(isolate(), masm, flags, kPrimary, name, scratch, extra);
ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(scratch, name);
__ add(scratch, Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
__ mov(offset, FieldOperand(name, String::kHashFieldOffset));
__ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(offset, flags);
__ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(offset, name);
__ add(offset, Immediate(flags));
__ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
ProbeTable(isolate(), masm, flags, kSecondary, name, scratch, extra);
ProbeTable(
isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
}

16
deps/v8/src/ic.cc

@ -2482,9 +2482,21 @@ CompareIC::State CompareIC::TargetState(State state,
case UNINITIALIZED:
if (x->IsSmi() && y->IsSmi()) return SMIS;
if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
// HEAP_NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
return HEAP_NUMBERS;
}
}
if (x->IsSymbol() && y->IsSymbol()) {
// We compare symbols as strings if we need to determine
// the order in a non-equality compare.
return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
}
if (x->IsString() && y->IsString()) return STRINGS;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
Handle<JSObject>::cast(y)->map() &&

226
deps/v8/src/interface.cc

@ -0,0 +1,226 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "interface.h"
namespace v8 {
namespace internal {
static bool Match(void* key1, void* key2) {
String* name1 = *static_cast<String**>(key1);
String* name2 = *static_cast<String**>(key2);
ASSERT(name1->IsSymbol());
ASSERT(name2->IsSymbol());
return name1 == name2;
}
Interface* Interface::Lookup(Handle<String> name) {
ASSERT(IsModule());
ZoneHashMap* map = Chase()->exports_;
if (map == NULL) return NULL;
ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false);
if (p == NULL) return NULL;
ASSERT(*static_cast<String**>(p->key) == *name);
ASSERT(p->value != NULL);
return static_cast<Interface*>(p->value);
}
#ifdef DEBUG
// Current nesting depth for debug output.
class Nesting {
public:
Nesting() { current_ += 2; }
~Nesting() { current_ -= 2; }
static int current() { return current_; }
private:
static int current_;
};
int Nesting::current_ = 0;
#endif
void Interface::DoAdd(
void* name, uint32_t hash, Interface* interface, bool* ok) {
MakeModule(ok);
if (!*ok) return;
#ifdef DEBUG
if (FLAG_print_interface_details) {
PrintF("%*s# Adding...\n", Nesting::current(), "");
PrintF("%*sthis = ", Nesting::current(), "");
this->Print(Nesting::current());
PrintF("%*s%s : ", Nesting::current(), "",
(*reinterpret_cast<String**>(name))->ToAsciiArray());
interface->Print(Nesting::current());
}
#endif
ZoneHashMap** map = &Chase()->exports_;
if (*map == NULL) *map = new ZoneHashMap(Match, 8);
ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen());
if (p == NULL) {
// This didn't have name but was frozen already, that's an error.
*ok = false;
} else if (p->value == NULL) {
p->value = interface;
} else {
#ifdef DEBUG
Nesting nested;
#endif
reinterpret_cast<Interface*>(p->value)->Unify(interface, ok);
}
#ifdef DEBUG
if (FLAG_print_interface_details) {
PrintF("%*sthis' = ", Nesting::current(), "");
this->Print(Nesting::current());
PrintF("%*s# Added.\n", Nesting::current(), "");
}
#endif
}
void Interface::Unify(Interface* that, bool* ok) {
if (this->forward_) return this->Chase()->Unify(that, ok);
if (that->forward_) return this->Unify(that->Chase(), ok);
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
*ok = true;
if (this == that) return;
if (this->IsValue()) return that->MakeValue(ok);
if (that->IsValue()) return this->MakeValue(ok);
#ifdef DEBUG
if (FLAG_print_interface_details) {
PrintF("%*s# Unifying...\n", Nesting::current(), "");
PrintF("%*sthis = ", Nesting::current(), "");
this->Print(Nesting::current());
PrintF("%*sthat = ", Nesting::current(), "");
that->Print(Nesting::current());
}
#endif
// Merge the smaller interface into the larger, for performance.
if (this->exports_ != NULL && (that->exports_ == NULL ||
this->exports_->occupancy() >= that->exports_->occupancy())) {
this->DoUnify(that, ok);
} else {
that->DoUnify(this, ok);
}
#ifdef DEBUG
if (FLAG_print_interface_details) {
PrintF("%*sthis' = ", Nesting::current(), "");
this->Print(Nesting::current());
PrintF("%*sthat' = ", Nesting::current(), "");
that->Print(Nesting::current());
PrintF("%*s# Unified.\n", Nesting::current(), "");
}
#endif
}
void Interface::DoUnify(Interface* that, bool* ok) {
ASSERT(this->forward_ == NULL);
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
ASSERT(!that->IsValue());
ASSERT(*ok);
#ifdef DEBUG
Nesting nested;
#endif
// Try to merge all members from that into this.
ZoneHashMap* map = that->exports_;
if (map != NULL) {
for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), ok);
if (!*ok) return;
}
}
// If the new interface is larger than that's, then there were members in
// 'this' which 'that' didn't have. If 'that' was frozen that is an error.
int this_size = this->exports_ == NULL ? 0 : this->exports_->occupancy();
int that_size = map == NULL ? 0 : map->occupancy();
if (that->IsFrozen() && this_size > that_size) {
*ok = false;
return;
}
// Merge interfaces.
this->flags_ |= that->flags_;
that->forward_ = this;
}
#ifdef DEBUG
void Interface::Print(int n) {
int n0 = n > 0 ? n : 0;
if (FLAG_print_interface_details) {
PrintF("%p", static_cast<void*>(this));
for (Interface* link = this->forward_; link != NULL; link = link->forward_)
PrintF("->%p", static_cast<void*>(link));
PrintF(" ");
}
if (IsUnknown()) {
PrintF("unknown\n");
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
ZoneHashMap* map = Chase()->exports_;
if (map == NULL || map->occupancy() == 0) {
PrintF("}\n");
} else if (n < 0 || n0 >= 2 * FLAG_print_interface_depth) {
// Avoid infinite recursion on cyclic types.
PrintF("...}\n");
} else {
PrintF("\n");
for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
String* name = *static_cast<String**>(p->key);
Interface* interface = static_cast<Interface*>(p->value);
PrintF("%*s%s : ", n0 + 2, "", name->ToAsciiArray());
interface->Print(n0 + 2);
}
PrintF("%*s}\n", n0, "");
}
}
}
#endif
} } // namespace v8::internal

156
deps/v8/src/interface.h

@ -0,0 +1,156 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_INTERFACE_H_
#define V8_INTERFACE_H_
#include "zone-inl.h" // For operator new.
namespace v8 {
namespace internal {
// This class implements the following abstract grammar of interfaces
// (i.e. module types):
// interface ::= UNDETERMINED | VALUE | MODULE(exports)
// exports ::= {name : interface, ...}
// A frozen module type is one that is fully determined. Unification does not
// allow adding additional exports to frozen interfaces.
// Otherwise, unifying modules merges their exports.
// Undetermined types are unification variables that can be unified freely.
class Interface : public ZoneObject {
public:
// ---------------------------------------------------------------------------
// Factory methods.
static Interface* NewValue() {
static Interface value_interface(VALUE + FROZEN); // Cached.
return &value_interface;
}
static Interface* NewUnknown() {
return new Interface(NONE);
}
static Interface* NewModule() {
return new Interface(MODULE);
}
// ---------------------------------------------------------------------------
// Mutators.
// Add a name to the list of exports. If it already exists, unify with
// interface, otherwise insert unless this is closed.
void Add(Handle<String> name, Interface* interface, bool* ok) {
DoAdd(name.location(), name->Hash(), interface, ok);
}
// Unify with another interface. If successful, both interface objects will
// represent the same type, and changes to one are reflected in the other.
void Unify(Interface* that, bool* ok);
// Determine this interface to be a value interface.
void MakeValue(bool* ok) {
*ok = !IsModule();
if (*ok) Chase()->flags_ |= VALUE;
}
// Determine this interface to be a module interface.
void MakeModule(bool* ok) {
*ok = !IsValue();
if (*ok) Chase()->flags_ |= MODULE;
}
// Do not allow any further refinements, directly or through unification.
void Freeze(bool* ok) {
*ok = IsValue() || IsModule();
if (*ok) Chase()->flags_ |= FROZEN;
}
// ---------------------------------------------------------------------------
// Accessors.
// Look up an exported name. Returns NULL if not (yet) defined.
Interface* Lookup(Handle<String> name);
// Check whether this is still a fully undetermined type.
bool IsUnknown() { return Chase()->flags_ == NONE; }
// Check whether this is a value type.
bool IsValue() { return Chase()->flags_ & VALUE; }
// Check whether this is a module type.
bool IsModule() { return Chase()->flags_ & MODULE; }
// Check whether this is closed (i.e. fully determined).
bool IsFrozen() { return Chase()->flags_ & FROZEN; }
// ---------------------------------------------------------------------------
// Debugging.
#ifdef DEBUG
void Print(int n = 0); // n = indentation; n < 0 => don't print recursively
#endif
// ---------------------------------------------------------------------------
// Implementation.
private:
enum Flags { // All flags are monotonic
NONE = 0,
VALUE = 1, // This type describes a value
MODULE = 2, // This type describes a module
FROZEN = 4 // This type is fully determined
};
int flags_;
Interface* forward_; // Unification link
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
explicit Interface(int flags)
: flags_(flags),
forward_(NULL),
exports_(NULL) {
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Creating %p\n", static_cast<void*>(this));
#endif
}
Interface* Chase() {
Interface* result = this;
while (result->forward_ != NULL) result = result->forward_;
if (result != this) forward_ = result; // On-the-fly path compression.
return result;
}
void DoAdd(void* name, uint32_t hash, Interface* interface, bool* ok);
void DoUnify(Interface* that, bool* ok);
};
} } // namespace v8::internal
#endif // V8_INTERFACE_H_

8
deps/v8/src/isolate.cc

@ -1486,6 +1486,7 @@ Isolate::Isolate()
has_installed_extensions_(false),
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
embedder_data_(NULL),
context_exit_happened_(false) {
TRACE_ISOLATE(constructor);
@ -1618,6 +1619,9 @@ Isolate::~Isolate() {
delete unicode_cache_;
unicode_cache_ = NULL;
delete date_cache_;
date_cache_ = NULL;
delete regexp_stack_;
regexp_stack_ = NULL;
@ -1782,6 +1786,7 @@ bool Isolate::Init(Deserializer* des) {
stub_cache_ = new StubCache(this);
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
// Enable logging before setting up the heap
logger_->SetUp();
@ -1836,13 +1841,12 @@ bool Isolate::Init(Deserializer* des) {
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_->SetUp(create_heap_objects);
#endif
stub_cache_->Initialize(create_heap_objects);
// If we are deserializing, read the state into the now-empty heap.
if (des != NULL) {
des->Deserialize();
stub_cache_->Initialize(true);
}
stub_cache_->Initialize();
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();

15
deps/v8/src/isolate.h

@ -36,6 +36,7 @@
#include "contexts.h"
#include "execution.h"
#include "frames.h"
#include "date.h"
#include "global-handles.h"
#include "handles.h"
#include "hashmap.h"
@ -1017,6 +1018,17 @@ class Isolate {
return OS::TimeCurrentMillis() - time_millis_at_init_;
}
DateCache* date_cache() {
return date_cache_;
}
void set_date_cache(DateCache* date_cache) {
if (date_cache != date_cache_) {
delete date_cache_;
}
date_cache_ = date_cache;
}
private:
Isolate();
@ -1184,6 +1196,9 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
void* embedder_data_;

12
deps/v8/src/jsregexp.cc

@ -175,7 +175,8 @@ Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
case JSRegExp::IRREGEXP: {
Handle<Object> result =
IrregexpExec(regexp, subject, index, last_match_info);
ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
ASSERT(!result.is_null() ||
regexp->GetIsolate()->has_pending_exception());
return result;
}
default:
@ -527,6 +528,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<String> subject,
int previous_index,
Handle<JSArray> last_match_info) {
Isolate* isolate = jsregexp->GetIsolate();
ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
// Prepare space for the return values.
@ -542,11 +544,11 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
ASSERT(Isolate::Current()->has_pending_exception());
ASSERT(isolate->has_pending_exception());
return Handle<Object>::null();
}
OffsetsVector registers(required_registers);
OffsetsVector registers(required_registers, isolate);
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
jsregexp, subject, previous_index, Vector<int>(registers.vector(),
@ -568,11 +570,11 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
return last_match_info;
}
if (res == RE_EXCEPTION) {
ASSERT(Isolate::Current()->has_pending_exception());
ASSERT(isolate->has_pending_exception());
return Handle<Object>::null();
}
ASSERT(res == RE_FAILURE);
return Isolate::Current()->factory()->null_value();
return isolate->factory()->null_value();
}

4
deps/v8/src/jsregexp.h

@ -1466,12 +1466,12 @@ class RegExpEngine: public AllStatic {
class OffsetsVector {
public:
explicit inline OffsetsVector(int num_registers)
inline OffsetsVector(int num_registers, Isolate* isolate)
: offsets_vector_length_(num_registers) {
if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
vector_ = NewArray<int>(offsets_vector_length_);
} else {
vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
vector_ = isolate->jsregexp_static_offsets_vector();
}
}
inline ~OffsetsVector() {

16
deps/v8/src/lithium-allocator.cc

@ -1105,7 +1105,7 @@ bool LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
HPhase phase("Register constraints", chunk_);
HPhase phase("L_Register constraints", chunk_);
first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int i = 0; i < blocks->length(); ++i) {
@ -1117,7 +1117,7 @@ void LAllocator::MeetRegisterConstraints() {
void LAllocator::ResolvePhis() {
HPhase phase("Resolve phis", chunk_);
HPhase phase("L_Resolve phis", chunk_);
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@ -1207,7 +1207,7 @@ HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
void LAllocator::ConnectRanges() {
HPhase phase("Connect ranges", this);
HPhase phase("L_Connect ranges", this);
for (int i = 0; i < live_ranges()->length(); ++i) {
LiveRange* first_range = live_ranges()->at(i);
if (first_range == NULL || first_range->parent() != NULL) continue;
@ -1247,7 +1247,7 @@ bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
void LAllocator::ResolveControlFlow() {
HPhase phase("Resolve control flow", this);
HPhase phase("L_Resolve control flow", this);
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
for (int block_id = 1; block_id < blocks->length(); ++block_id) {
HBasicBlock* block = blocks->at(block_id);
@ -1268,7 +1268,7 @@ void LAllocator::ResolveControlFlow() {
void LAllocator::BuildLiveRanges() {
HPhase phase("Build live ranges", this);
HPhase phase("L_Build live ranges", this);
InitializeLivenessAnalysis();
// Process the blocks in reverse order.
const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@ -1373,7 +1373,7 @@ bool LAllocator::SafePointsAreInOrder() const {
void LAllocator::PopulatePointerMaps() {
HPhase phase("Populate pointer maps", this);
HPhase phase("L_Populate pointer maps", this);
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
ASSERT(SafePointsAreInOrder());
@ -1492,14 +1492,14 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
HPhase phase("L_Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("Allocate double registers", this);
HPhase phase("L_Allocate double registers", this);
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();

9
deps/v8/src/lithium.h

@ -438,14 +438,14 @@ class LPointerMap: public ZoneObject {
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
bool is_arguments_adaptor,
FrameType frame_type,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
is_arguments_adaptor_(is_arguments_adaptor),
frame_type_(frame_type),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
@ -459,6 +459,7 @@ class LEnvironment: public ZoneObject {
outer_(outer) { }
Handle<JSFunction> closure() const { return closure_; }
FrameType frame_type() const { return frame_type_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
@ -503,11 +504,9 @@ class LEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
bool is_arguments_adaptor() const { return is_arguments_adaptor_; }
private:
Handle<JSFunction> closure_;
bool is_arguments_adaptor_;
FrameType frame_type_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;

40
deps/v8/src/macros.py

@ -164,16 +164,36 @@ const MAX_TIME_BEFORE_UTC = 8640002592000000;
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
macro NAN_OR_SEC_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : SEC_FROM_TIME(time));
macro MS_FROM_TIME(time) = (Modulo(time, 1000));
macro NAN_OR_MS_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MS_FROM_TIME(time));
macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError();
macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
macro UTC_DATE_VALUE(arg) = (%_DateField(arg, 0));
macro LOCAL_YEAR(arg) = (%_DateField(arg, 1));
macro LOCAL_MONTH(arg) = (%_DateField(arg, 2));
macro LOCAL_DAY(arg) = (%_DateField(arg, 3));
macro LOCAL_WEEKDAY(arg) = (%_DateField(arg, 4));
macro LOCAL_HOUR(arg) = (%_DateField(arg, 5));
macro LOCAL_MIN(arg) = (%_DateField(arg, 6));
macro LOCAL_SEC(arg) = (%_DateField(arg, 7));
macro LOCAL_MS(arg) = (%_DateField(arg, 8));
macro LOCAL_DAYS(arg) = (%_DateField(arg, 9));
macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
macro UTC_YEAR(arg) = (%_DateField(arg, 11));
macro UTC_MONTH(arg) = (%_DateField(arg, 12));
macro UTC_DAY(arg) = (%_DateField(arg, 13));
macro UTC_WEEKDAY(arg) = (%_DateField(arg, 14));
macro UTC_HOUR(arg) = (%_DateField(arg, 15));
macro UTC_MIN(arg) = (%_DateField(arg, 16));
macro UTC_SEC(arg) = (%_DateField(arg, 17));
macro UTC_MS(arg) = (%_DateField(arg, 18));
macro UTC_DAYS(arg) = (%_DateField(arg, 19));
macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
macro TIMEZONE_OFFSET(arg) = (%_DateField(arg, 21));
macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);

4
deps/v8/src/mark-compact-inl.h

@ -45,8 +45,10 @@ MarkBit Marking::MarkBitFrom(Address addr) {
void MarkCompactCollector::SetFlags(int flags) {
sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
abort_incremental_marking_ =
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
}

4
deps/v8/src/mark-compact.cc

@ -686,8 +686,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
#endif
// Clear marking bits for precise sweeping to collect all garbage.
if (was_marked_incrementally_ && PreciseSweepingRequired()) {
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
ClearMarkbits();
AbortCompaction();

9
deps/v8/src/mark-compact.h

@ -420,14 +420,9 @@ class MarkCompactCollector {
// Pointer to member function, used in IterateLiveObjects.
typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
// Set the global force_compaction flag, it must be called before Prepare
// to take effect.
// Set the global flags, it must be called before Prepare to take effect.
inline void SetFlags(int flags);
inline bool PreciseSweepingRequired() {
return sweep_precisely_;
}
static void Initialize();
void CollectEvacuationCandidates(PagedSpace* space);
@ -579,6 +574,8 @@ class MarkCompactCollector {
bool reduce_memory_footprint_;
bool abort_incremental_marking_;
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;

30
deps/v8/src/messages.js

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -210,7 +210,7 @@ function FormatMessage(message) {
"no_input_to_regexp", ["No input to ", "%0"],
"invalid_json", ["String '", "%0", "' is not valid JSON"],
"circular_structure", ["Converting circular structure to JSON"],
"obj_ctor_property_non_object", ["Object.", "%0", " called on non-object"],
"called_on_non_object", ["%0", " called on non-object"],
"called_on_null_or_undefined", ["%0", " called on null or undefined"],
"array_indexof_not_defined", ["Array.getIndexOf: Argument undefined"],
"object_not_extensible", ["Can't add property ", "%0", ", object is not extensible"],
@ -246,6 +246,8 @@ function FormatMessage(message) {
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
"harmony_const_assign", ["Assignment to constant variable."],
"invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"],
"module_type_error", ["Module '", "%0", "' used improperly"],
];
var messages = { __proto__ : null };
for (var i = 0; i < messagesDictionary.length; i += 2) {
@ -533,6 +535,13 @@ function ScriptNameOrSourceURL() {
if (this.name) {
return this.name;
}
// The result is cached as on long scripts it takes noticable time to search
// for the sourceURL.
if (this.hasCachedNameOrSourceURL)
return this.cachedNameOrSourceURL;
this.hasCachedNameOrSourceURL = true;
// TODO(608): the spaces in a regexp below had to be escaped as \040
// because this file is being processed by js2c whose handling of spaces
// in regexps is broken. Also, ['"] are excluded from allowed URLs to
@ -541,6 +550,7 @@ function ScriptNameOrSourceURL() {
// the scanner/parser.
var source = ToString(this.source);
var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
this.cachedNameOrSourceURL = this.name;
if (sourceUrlPos > 4) {
var sourceUrlPattern =
/\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
@ -551,15 +561,17 @@ function ScriptNameOrSourceURL() {
var match =
%_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
if (match) {
return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
this.cachedNameOrSourceURL =
SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
}
}
return this.name;
return this.cachedNameOrSourceURL;
}
SetUpLockedPrototype(Script,
$Array("source", "name", "line_ends", "line_offset", "column_offset"),
$Array("source", "name", "line_ends", "line_offset", "column_offset",
"cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ),
$Array(
"lineFromPosition", ScriptLineFromPosition,
"locationFromPosition", ScriptLocationFromPosition,
@ -759,8 +771,7 @@ function DefineOneShotAccessor(obj, name, fun) {
hasBeenSet = true;
value = v;
};
%DefineOrRedefineAccessorProperty(obj, name, GETTER, getter, DONT_ENUM);
%DefineOrRedefineAccessorProperty(obj, name, SETTER, setter, DONT_ENUM);
%DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
}
function CallSite(receiver, fun, pos) {
@ -1190,9 +1201,8 @@ function ErrorToStringDetectCycle(error) {
}
function ErrorToString() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Error.prototype.toString"]);
if (!IS_SPEC_OBJECT(this)) {
throw MakeTypeError("called_on_non_object", ["Error.prototype.toString"]);
}
try {

19
deps/v8/src/mips/builtins-mips.cc

@ -1008,6 +1008,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
NullCallWrapper(), CALL_AS_METHOD);
}
// Store offset of return address for deoptimizer.
if (!is_api_function && !count_constructions) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
}
// Restore context from the frame.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -1730,8 +1735,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
// TODO(MIPS): Optimize these loops.
// Calculate copy start address into a0 and copy end address is fp.
// a0: actual number of arguments as a smi
// a1: function
@ -1753,9 +1756,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
__ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver.
__ push(t0);
__ Subu(sp, sp, kPointerSize);
__ Subu(a0, a0, kPointerSize);
__ Branch(&copy, ne, a0, Operand(t3));
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
__ sw(t0, MemOperand(sp)); // In the delay slot.
// Fill the remaining expected arguments with undefined.
// a1: function
@ -1768,8 +1772,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label fill;
__ bind(&fill);
__ push(t0);
__ Branch(&fill, ne, sp, Operand(a2));
__ Subu(sp, sp, kPointerSize);
__ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
__ sw(t0, MemOperand(sp));
}
// Call the entry point.
@ -1777,7 +1782,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Call(a3);
// Store offset of return address for deoptimizer.
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Ret();

53
deps/v8/src/mips/code-stubs-mips.cc

@ -5363,17 +5363,19 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&done, eq, a3, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ Branch(&done);
__ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
// An uninitialized cache is patched with the function.
// Store a1 in the delay slot. This may or may not get overwritten depending
// on the result of the comparison.
__ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
// No need for a write barrier here - cells are rescanned.
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
__ bind(&done);
}
@ -6149,8 +6151,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
__ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
__ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
__ sra(t0, t0, 1); // Add offset to index.
__ Addu(a3, a3, t0);
// Update instance type.
@ -6188,8 +6190,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
__ bind(&set_slice_header);
__ sll(a3, a3, 1);
__ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
__ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
__ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
__ jmp(&return_v0);
__ bind(&copy_routine);
@ -6783,15 +6785,15 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &generic_stub);
__ GetObjectType(a0, a2, a2);
__ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ GetObjectType(a1, a2, a2);
__ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or FPU is unsupported.
@ -6823,14 +6825,29 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ bind(&fpu_lt);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(LESS)); // In delay slot.
__ bind(&unordered);
}
__ bind(&unordered);
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
__ bind(&generic_stub);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
}
__ bind(&maybe_undefined2);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&unordered, eq, a1, Operand(at));
}
__ bind(&miss);
GenerateMiss(masm);
}
@ -7070,7 +7087,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the null value).
// (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
@ -7099,9 +7116,15 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(done, eq, entity_name, Operand(tmp));
if (i != kInlinedProbes - 1) {
// Load the hole ready for use below:
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property.
__ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
Label the_hole;
__ Branch(&the_hole, eq, entity_name, Operand(tmp));
// Check if the entry name is not a symbol.
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
@ -7109,6 +7132,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ And(scratch0, entity_name, Operand(kIsSymbolMask));
__ Branch(miss, eq, scratch0, Operand(zero_reg));
__ bind(&the_hole);
// Restore the properties.
__ lw(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));

13
deps/v8/src/mips/codegen-mips.cc

@ -37,6 +37,19 @@ namespace internal {
#define __ ACCESS_MASM(masm)
TranscendentalFunction CreateTranscendentalFunction(
TranscendentalCache::Type type) {
switch (type) {
case TranscendentalCache::SIN: return &sin;
case TranscendentalCache::COS: return &cos;
case TranscendentalCache::TAN: return &tan;
case TranscendentalCache::LOG: return &log;
default: UNIMPLEMENTED();
}
return NULL;
}
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.

157
deps/v8/src/mips/deoptimizer-mips.cc

@ -355,7 +355,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
@ -377,16 +376,13 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
@ -396,7 +392,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
@ -408,7 +403,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
@ -419,7 +413,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -429,7 +422,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
@ -449,6 +441,119 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = 7 * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// A marker value is used in place of the function.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
// Constructor function being invoked by the stub.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
top_address + output_offset, output_offset, value);
}
// The newly allocated object was passed as receiver in the artificial
// constructor stub environment created by HEnvironment::CopyForInlining().
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
uint32_t pc = reinterpret_cast<uint32_t>(
construct_stub->instruction_start() +
isolate_->heap()->construct_stub_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@ -561,9 +666,8 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
}
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
@ -837,7 +941,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Maximum size of a table entry generated below.
const int Deoptimizer::table_entry_size_ = 12 * Assembler::kInstrSize;
const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
@ -851,29 +955,20 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ bind(&start);
if (type() != EAGER) {
// Emulate ia32 like call by pushing return address to stack.
__ addiu(sp, sp, -3 * kPointerSize);
__ sw(ra, MemOperand(sp, 2 * kPointerSize));
} else {
__ addiu(sp, sp, -2 * kPointerSize);
__ sw(ra, MemOperand(sp, 1 * kPointerSize));
} else {
__ addiu(sp, sp, -1 * kPointerSize);
}
// Using ori makes sure only one instruction is generated. This will work
// as long as the number of deopt entries is below 2^16.
__ ori(at, zero_reg, i);
__ sw(at, MemOperand(sp, kPointerSize));
__ sw(ra, MemOperand(sp, 0));
// This branch instruction only jumps over one instruction, and that is
// executed in the delay slot. The result is that execution is linear but
// the ra register is updated.
__ bal(1);
// Jump over the remaining deopt entries (including this one).
// Only include the remaining part of the current entry in the calculation.
// This code is always reached by calling Jump, which puts the target (label
// start) into t9.
const int remaining_entries = (count() - i) * table_entry_size_;
const int cur_size = masm()->SizeOfCodeGeneratedSince(&start);
// ra points to the instruction after the delay slot. Adjust by 4.
__ Addu(at, ra, remaining_entries - cur_size - Assembler::kInstrSize);
__ lw(ra, MemOperand(sp, 0));
__ jr(at); // Expose delay slot.
__ addiu(sp, sp, kPointerSize); // In delay slot.
__ Addu(t9, t9, remaining_entries);
// 'at' was clobbered so we can only load the current entry value here.
__ li(at, i);
__ jr(t9); // Expose delay slot.
__ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
// Pad the rest of the code.
while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {

78
deps/v8/src/mips/full-codegen-mips.cc

@ -120,7 +120,7 @@ class JumpPatchSite BASE_EMBEDDED {
int FullCodeGenerator::self_optimization_header_size() {
return 0; // TODO(jkummerow): determine correct value.
return 11 * Instruction::kInstrSize;
}
@ -164,7 +164,7 @@ void FullCodeGenerator::Generate() {
Handle<Code> compile_stub(
isolate()->builtins()->builtin(Builtins::kLazyRecompile));
__ Jump(compile_stub, RelocInfo::CODE_TARGET, eq, a3, Operand(zero_reg));
ASSERT(masm_->pc_offset() == self_optimization_header_size());
ASSERT_EQ(masm_->pc_offset(), self_optimization_header_size());
}
}
@ -952,7 +952,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Branch(&exit, eq, a0, Operand(null_value));
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
__ mov(a0, v0);
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
@ -975,44 +976,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = t3;
__ LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
__ mov(a1, a0);
__ bind(&next);
// Check that there are no elements. Register a1 contains the
// current JS object we've reached through the prototype chain.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
__ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in a2 for the subsequent
// prototype load.
__ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
__ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
__ JumpIfSmi(a3, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (a3). This is the case if the next enumeration
// index field does not contain a smi.
__ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
__ JumpIfSmi(a3, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
__ Branch(&check_prototype, eq, a1, Operand(a0));
__ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
__ Branch(&next, ne, a1, Operand(null_value));
__ CheckEnumCache(null_value, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@ -1051,6 +1015,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register v0. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
RecordTypeFeedbackCell(stmt->PrepareId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
__ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
__ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@ -1064,6 +1038,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(a1, a0); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
// Load the current count to a0, load the length to a1.
__ lw(a0, MemOperand(sp, 0 * kPointerSize));
@ -1108,7 +1083,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(result_register(), a3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->AssignmentId());
EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
@ -1129,6 +1104,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Drop(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(&exit);
decrement_loop_depth();
}
@ -1534,11 +1510,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ lw(a0, MemOperand(sp));
__ push(a0);
VisitForStackValue(key);
__ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
Smi::FromInt(1) :
Smi::FromInt(0)));
__ push(a1);
VisitForStackValue(value);
if (property->kind() == ObjectLiteral::Property::GETTER) {
VisitForStackValue(value);
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
} else {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
VisitForStackValue(value);
}
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
@ -1899,7 +1879,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
}
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
void FullCodeGenerator::EmitAssignment(Expression* expr) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@ -1951,7 +1931,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
break;
}
}
PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(v0);
}
@ -2444,6 +2423,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(flags);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}

13
deps/v8/src/mips/ic-mips.cc

@ -401,7 +401,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
NORMAL,
argc);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1);
masm, flags, a1, a2, a3, t0, t1, t2);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -437,7 +437,7 @@ void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1);
masm, flags, a1, a2, a3, t0, t1, t2);
__ bind(&miss);
}
@ -702,7 +702,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1);
masm, flags, a0, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1513,7 +1513,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags =
Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a1, a2, a3, t0, t1);
masm, flags, a1, a2, a3, t0, t1, t2);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@ -1571,7 +1571,10 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// Check that the array has fast properties, otherwise the length
// property might have been redefined.
// TODO(mstarzinger): Port this check to MIPS.
__ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
__ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&miss, eq, scratch, Operand(at));
// Check that value is a smi.
__ JumpIfNotSmi(value, &miss);

236
deps/v8/src/mips/lithium-codegen-mips.cc

@ -62,7 +62,7 @@ class SafepointGenerator : public CallWrapper {
#define __ masm()->
bool LCodeGen::GenerateCode() {
HPhase phase("Code generation", chunk());
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
CpuFeatures::Scope scope(FPU);
@ -447,10 +447,18 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
switch (environment->frame_type()) {
case JS_FUNCTION:
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
break;
case JS_CONSTRUCT:
translation->BeginConstructStubFrame(closure_id, translation_size);
break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
default:
UNREACHABLE();
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
@ -580,7 +588,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
@ -3125,14 +3133,63 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LRandom* instr_;
};
DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(f0));
ASSERT(ToRegister(instr->InputAt(0)).is(a0));
__ PrepareCallCFunction(1, a1);
static const int kSeedSize = sizeof(uint32_t);
STATIC_ASSERT(kPointerSize == kSeedSize);
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
static const int kRandomSeedOffset =
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
// a2: FixedArray of the global context's random seeds
// Load state[0].
__ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
__ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
// Load state[1].
__ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
// a1: state[0].
// a0: state[1].
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
__ And(a3, a1, Operand(0xFFFF));
__ li(t0, Operand(18273));
__ mul(a3, a3, t0);
__ srl(a1, a1, 16);
__ Addu(a1, a3, a1);
// Save state[0].
__ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
__ And(a3, a0, Operand(0xFFFF));
__ li(t0, Operand(36969));
__ mul(a3, a3, t0);
__ srl(a0, a0, 16),
__ Addu(a0, a3, a0);
// Save state[1].
__ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
__ And(a0, a0, Operand(0x3FFFF));
__ sll(a1, a1, 14);
__ Addu(v0, a0, a1);
__ bind(deferred->exit());
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a2, Operand(0x41300000));
@ -3144,6 +3201,12 @@ void LCodeGen::DoRandom(LRandom* instr) {
__ sub_d(f0, f12, f14);
}
void LCodeGen::DoDeferredRandom(LRandom* instr) {
__ PrepareCallCFunction(1, scratch0());
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Return value is in v0.
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
@ -4222,6 +4285,80 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
class DeferredAllocateObject: public LDeferredCode {
public:
DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
virtual LInstruction* instr() { return instr_; }
private:
LAllocateObject* instr_;
};
DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
Register result = ToRegister(instr->result());
Register scratch = ToRegister(instr->TempAt(0));
Register scratch2 = ToRegister(instr->TempAt(1));
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
Handle<Map> initial_map(constructor->initial_map());
int instance_size = initial_map->instance_size();
ASSERT(initial_map->pre_allocated_property_fields() +
initial_map->unused_property_fields() -
initial_map->inobject_properties() == 0);
// Allocate memory for the object. The initial map might change when
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
__ AllocateInNewSpace(instance_size,
result,
scratch,
scratch2,
deferred->entry(),
TAG_OBJECT);
// Load the initial map.
Register map = scratch;
__ LoadHeapObject(map, constructor);
__ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
// Initialize map and fields of the newly allocated object.
ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
__ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
__ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
__ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
__ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
if (initial_map->inobject_properties() != 0) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < initial_map->inobject_properties(); i++) {
int property_offset = JSObject::kHeaderSize + i * kPointerSize;
__ sw(scratch, FieldMemOperand(result, property_offset));
}
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
Register result = ToRegister(instr->result());
Handle<JSFunction> constructor = instr->hydrogen()->constructor();
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ mov(result, zero_reg);
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ LoadHeapObject(a0, constructor);
__ push(a0);
CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
__ StoreToSafepointRegisterSlot(v0, result);
}
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Heap* heap = isolate()->heap();
ElementsKind boilerplate_elements_kind =
@ -4790,6 +4927,89 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(eq, instr->environment(), object, Operand(at));
Register null_value = t1;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
__ And(at, object, kSmiTagMask);
DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ GetObjectType(object, a1, a1);
DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
Label use_cache, call_runtime;
ASSERT(object.is(a0));
__ CheckEnumCache(null_value, &call_runtime);
__ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
__ Branch(&use_cache);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
ASSERT(result.is(v0));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
__ bind(&use_cache);
}
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ lw(result,
FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
}
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
__ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Label out_of_object, done;
__ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
__ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
__ Addu(scratch, object, scratch);
__ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ Branch(&done);
__ bind(&out_of_object);
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
__ Subu(scratch, result, scratch);
__ lw(result, FieldMemOperand(scratch,
FixedArray::kHeaderSize - kPointerSize));
__ bind(&done);
}
#undef __
} } // namespace v8::internal

2
deps/v8/src/mips/lithium-codegen-mips.h

@ -110,8 +110,10 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);

461
deps/v8/src/mips/lithium-mips.cc

File diff suppressed because it is too large

78
deps/v8/src/mips/lithium-mips.h

@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@ -172,7 +173,11 @@ class LCodeGen;
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
V(ValueOf) \
V(ForInPrepareMap) \
V(ForInCacheArray) \
V(CheckMapValue) \
V(LoadFieldByIndex)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
@ -1917,6 +1922,18 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
public:
LAllocateObject(LOperand* temp1, LOperand* temp2) {
temps_[0] = temp1;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@ -2064,6 +2081,62 @@ class LIn: public LTemplateInstruction<1, 2, 0> {
};
class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInPrepareMap(LOperand* object) {
inputs_[0] = object;
}
LOperand* object() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
public:
explicit LForInCacheArray(LOperand* map) {
inputs_[0] = map;
}
LOperand* map() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
int idx() {
return HForInCacheArray::cast(this->hydrogen_value())->idx();
}
};
class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
public:
LCheckMapValue(LOperand* value, LOperand* map) {
inputs_[0] = value;
inputs_[1] = map;
}
LOperand* value() { return inputs_[0]; }
LOperand* map() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
};
class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
public:
LLoadFieldByIndex(LOperand* object, LOperand* index) {
inputs_[0] = object;
inputs_[1] = index;
}
LOperand* object() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@ -2131,6 +2204,7 @@ class LChunkBuilder BASE_EMBEDDED {
: chunk_(NULL),
info_(info),
graph_(graph),
zone_(graph->isolate()->zone()),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
@ -2160,6 +2234,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Zone* zone() const { return zone_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
@ -2265,6 +2340,7 @@ class LChunkBuilder BASE_EMBEDDED {
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Zone* zone_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;

42
deps/v8/src/mips/macro-assembler-mips.cc

@ -5093,6 +5093,48 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
}
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Label next;
// Preload a couple of values used in the loop.
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = t3;
LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
mov(a1, a0);
bind(&next);
// Check that there are no elements. Register a1 contains the
// current JS object we've reached through the prototype chain.
lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in a2 for the subsequent
// prototype load.
lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
JumpIfSmi(a3, call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (a3). This is the case if the next enumeration
// index field does not contain a smi.
lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
JumpIfSmi(a3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
Branch(&check_prototype, eq, a1, Operand(a0));
lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
// Load the prototype from the map and loop if non-null.
bind(&check_prototype);
lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
Branch(&next, ne, a1, Operand(null_value));
}
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
ASSERT(!output_reg.is(input_reg));
Label done;

4
deps/v8/src/mips/macro-assembler-mips.h

@ -1353,6 +1353,10 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,

2
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -1056,7 +1056,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
ASSERT(*return_address <=
re_code->instruction_start() + re_code->instruction_size());
MaybeObject* result = Execution::HandleStackGuardInterrupt();
MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
if (*code_handle != re_code) { // Return address no longer valid.
int delta = code_handle->address() - re_code->address();

131
deps/v8/src/mips/stub-cache-mips.cc

@ -43,51 +43,74 @@ static void ProbeTable(Isolate* isolate,
MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register receiver,
Register name,
// Number of the cache entry, not scaled.
Register offset,
Register scratch,
Register scratch2) {
Register scratch2,
Register offset_scratch) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
ASSERT(map_off_addr > key_off_addr);
ASSERT((map_off_addr - key_off_addr) % 4 == 0);
ASSERT((map_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register offsets_base_addr = scratch;
Register base_addr = scratch;
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ sll(offset_scratch, offset, 1);
__ Addu(offset_scratch, offset_scratch, offset);
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
__ sll(at, offset_scratch, kPointerSizeLog2);
__ Addu(base_addr, base_addr, at);
// Check that the key in the entry matches the name.
__ li(offsets_base_addr, Operand(key_offset));
__ sll(scratch2, offset, 1);
__ addu(scratch2, offsets_base_addr, scratch2);
__ lw(scratch2, MemOperand(scratch2));
__ Branch(&miss, ne, name, Operand(scratch2));
__ lw(at, MemOperand(base_addr, 0));
__ Branch(&miss, ne, name, Operand(at));
// Check the map matches.
__ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
__ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Branch(&miss, ne, at, Operand(scratch2));
// Get the code entry from the cache.
__ Addu(offsets_base_addr, offsets_base_addr,
Operand(value_off_addr - key_off_addr));
__ sll(scratch2, offset, 1);
__ addu(scratch2, offsets_base_addr, scratch2);
__ lw(scratch2, MemOperand(scratch2));
Register code = scratch2;
scratch2 = no_reg;
__ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
// Check that the flags match what we're looking for.
__ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
__ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
__ Branch(&miss, ne, scratch2, Operand(flags));
Register flags_reg = base_addr;
base_addr = no_reg;
__ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
__ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
__ Branch(&miss, ne, flags_reg, Operand(flags));
// Re-load code entry from cache.
__ sll(offset, offset, 1);
__ addu(offset, offset, offsets_base_addr);
__ lw(offset, MemOperand(offset));
#ifdef DEBUG
if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
__ jmp(&miss);
} else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
__ jmp(&miss);
}
#endif
// Jump to the first instruction in the code stub.
__ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(offset);
__ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
// Miss: fall through.
__ bind(&miss);
@ -157,13 +180,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register name,
Register scratch,
Register extra,
Register extra2) {
Register extra2,
Register extra3) {
Isolate* isolate = masm->isolate();
Label miss;
// Make sure that code is valid. The shifting code relies on the
// entry size being 8.
ASSERT(sizeof(Entry) == 8);
// Make sure that code is valid. The multiplying code relies on the
// entry size being 12.
ASSERT(sizeof(Entry) == 12);
// Make sure the flags does not name a specific type.
ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@ -179,39 +203,66 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra2.is(scratch));
ASSERT(!extra2.is(extra));
// Check scratch, extra and extra2 registers are valid.
// Check register validity.
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
ASSERT(!extra3.is(no_reg));
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
extra2, extra3);
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss, t0);
__ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, Operand(t8));
__ Xor(scratch, scratch, Operand(flags));
__ And(scratch,
scratch,
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Addu(scratch, scratch, at);
uint32_t mask = kPrimaryTableSize - 1;
// We shift out the last two bits because they are not part of the hash and
// they are always 01 for maps.
__ srl(scratch, scratch, kHeapObjectTagSize);
__ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
__ And(scratch, scratch, Operand(mask));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
ProbeTable(isolate,
masm,
flags,
kPrimary,
receiver,
name,
scratch,
extra,
extra2,
extra3);
// Primary miss: Compute hash for secondary probe.
__ Subu(scratch, scratch, Operand(name));
__ Addu(scratch, scratch, Operand(flags));
__ And(scratch,
scratch,
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
__ srl(at, name, kHeapObjectTagSize);
__ Subu(scratch, scratch, at);
uint32_t mask2 = kSecondaryTableSize - 1;
__ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
__ And(scratch, scratch, Operand(mask2));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
ProbeTable(isolate,
masm,
flags,
kSecondary,
receiver,
name,
scratch,
extra,
extra2,
extra3);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
__ bind(&miss);
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
extra2, extra3);
}

50
deps/v8/src/objects-debug.cc

@ -138,6 +138,9 @@ void HeapObject::HeapObjectVerify() {
case JS_VALUE_TYPE:
JSValue::cast(this)->JSValueVerify();
break;
case JS_DATE_TYPE:
JSDate::cast(this)->JSDateVerify();
break;
case JS_FUNCTION_TYPE:
JSFunction::cast(this)->JSFunctionVerify();
break;
@ -371,6 +374,53 @@ void JSValue::JSValueVerify() {
}
void JSDate::JSDateVerify() {
if (value()->IsHeapObject()) {
VerifyHeapPointer(value());
}
CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
CHECK(cache_stamp()->IsUndefined() ||
cache_stamp()->IsSmi() ||
cache_stamp()->IsNaN());
if (month()->IsSmi()) {
int month = Smi::cast(this->month())->value();
CHECK(0 <= month && month <= 11);
}
if (day()->IsSmi()) {
int day = Smi::cast(this->day())->value();
CHECK(1 <= day && day <= 31);
}
if (hour()->IsSmi()) {
int hour = Smi::cast(this->hour())->value();
CHECK(0 <= hour && hour <= 23);
}
if (min()->IsSmi()) {
int min = Smi::cast(this->min())->value();
CHECK(0 <= min && min <= 59);
}
if (sec()->IsSmi()) {
int sec = Smi::cast(this->sec())->value();
CHECK(0 <= sec && sec <= 59);
}
if (weekday()->IsSmi()) {
int weekday = Smi::cast(this->weekday())->value();
CHECK(0 <= weekday && weekday <= 6);
}
if (cache_stamp()->IsSmi()) {
CHECK(Smi::cast(cache_stamp())->value() <=
Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
}
}
void JSMessageObject::JSMessageObjectVerify() {
CHECK(IsJSMessageObject());
CHECK(type()->IsString());

29
deps/v8/src/objects-inl.h

@ -605,6 +605,7 @@ TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
@ -800,6 +801,11 @@ double Object::Number() {
}
bool Object::IsNaN() {
return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value());
}
MaybeObject* Object::ToSmi() {
if (IsSmi()) return this;
if (IsHeapNumber()) {
@ -1425,6 +1431,8 @@ int JSObject::GetHeaderSize() {
return JSFunction::kSize;
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_DATE_TYPE:
return JSDate::kSize;
case JS_ARRAY_TYPE:
return JSArray::kSize;
case JS_WEAK_MAP_TYPE:
@ -1988,7 +1996,8 @@ AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
bool DescriptorArray::IsProperty(int descriptor_number) {
return IsRealProperty(GetType(descriptor_number));
Entry entry(this, descriptor_number);
return IsPropertyDescriptor(&entry);
}
@ -4118,6 +4127,24 @@ JSValue* JSValue::cast(Object* obj) {
}
ACCESSORS(JSDate, value, Object, kValueOffset)
ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
ACCESSORS(JSDate, year, Object, kYearOffset)
ACCESSORS(JSDate, month, Object, kMonthOffset)
ACCESSORS(JSDate, day, Object, kDayOffset)
ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
ACCESSORS(JSDate, hour, Object, kHourOffset)
ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
JSDate* JSDate::cast(Object* obj) {
ASSERT(obj->IsJSDate());
ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
return reinterpret_cast<JSDate*>(obj);
}
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)

27
deps/v8/src/objects-printer.cc

@ -151,6 +151,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
PrintF(out, "Value wrapper around:");
JSValue::cast(this)->value()->Print(out);
break;
case JS_DATE_TYPE:
JSDate::cast(this)->value()->Print(out);
break;
case CODE_TYPE:
Code::cast(this)->CodePrint(out);
break;
@ -660,6 +663,30 @@ char* String::ToAsciiArray() {
}
static const char* const weekdays[] = {
"???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
};
void JSDate::JSDatePrint(FILE* out) {
HeapObject::PrintHeader(out, "JSDate");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(out, " - value = ");
value()->Print(out);
if (!year()->IsSmi()) {
PrintF(out, " - time = NaN\n");
} else {
PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
year()->IsSmi() ? Smi::cast(year())->value() : -1,
month()->IsSmi() ? Smi::cast(month())->value() : -1,
day()->IsSmi() ? Smi::cast(day())->value() : -1,
hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
min()->IsSmi() ? Smi::cast(min())->value() : -1,
sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
}
}
void JSProxy::JSProxyPrint(FILE* out) {
HeapObject::PrintHeader(out, "JSProxy");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save