Browse Source

Upgrade V8 to 2.5.1

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
3b861db31d
  1. 17
      deps/v8/ChangeLog
  2. 6
      deps/v8/SConstruct
  3. 1
      deps/v8/src/SConscript
  4. 15
      deps/v8/src/arm/code-stubs-arm.cc
  5. 14
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  6. 1
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  7. 49
      deps/v8/src/ast.cc
  8. 17
      deps/v8/src/ast.h
  9. 5
      deps/v8/src/bootstrapper.cc
  10. 3
      deps/v8/src/bytecodes-irregexp.h
  11. 21
      deps/v8/src/contexts.h
  12. 44
      deps/v8/src/cpu-profiler.cc
  13. 8
      deps/v8/src/cpu-profiler.h
  14. 1
      deps/v8/src/flag-definitions.h
  15. 11
      deps/v8/src/heap-inl.h
  16. 122
      deps/v8/src/heap.cc
  17. 35
      deps/v8/src/heap.h
  18. 13
      deps/v8/src/ia32/code-stubs-ia32.cc
  19. 12
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  20. 1
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  21. 9
      deps/v8/src/interpreter-irregexp.cc
  22. 14
      deps/v8/src/jsregexp.cc
  23. 16
      deps/v8/src/log-utils.cc
  24. 4
      deps/v8/src/log-utils.h
  25. 60
      deps/v8/src/log.cc
  26. 11
      deps/v8/src/log.h
  27. 35
      deps/v8/src/mark-compact.cc
  28. 38
      deps/v8/src/objects-inl.h
  29. 6
      deps/v8/src/objects-visiting.h
  30. 26
      deps/v8/src/objects.cc
  31. 16
      deps/v8/src/objects.h
  32. 4
      deps/v8/src/platform-freebsd.cc
  33. 33
      deps/v8/src/platform-linux.cc
  34. 35
      deps/v8/src/platform-macos.cc
  35. 5
      deps/v8/src/platform-nullos.cc
  36. 4
      deps/v8/src/platform-openbsd.cc
  37. 9
      deps/v8/src/platform-solaris.cc
  38. 39
      deps/v8/src/platform-win32.cc
  39. 24
      deps/v8/src/platform.h
  40. 6
      deps/v8/src/regexp-macro-assembler-irregexp.cc
  41. 1
      deps/v8/src/regexp-macro-assembler-irregexp.h
  42. 6
      deps/v8/src/regexp-macro-assembler-tracer.cc
  43. 1
      deps/v8/src/regexp-macro-assembler-tracer.h
  44. 1
      deps/v8/src/regexp-macro-assembler.h
  45. 4
      deps/v8/src/runtime.cc
  46. 2
      deps/v8/src/runtime.h
  47. 2
      deps/v8/src/serialize.cc
  48. 9
      deps/v8/src/spaces-inl.h
  49. 8
      deps/v8/src/spaces.cc
  50. 1
      deps/v8/src/spaces.h
  51. 61
      deps/v8/src/strtod.cc
  52. 3
      deps/v8/src/top.cc
  53. 16
      deps/v8/src/top.h
  54. 2
      deps/v8/src/version.cc
  55. 6
      deps/v8/src/vm-state-inl.h
  56. 39
      deps/v8/src/vm-state.cc
  57. 10
      deps/v8/src/vm-state.h
  58. 13
      deps/v8/src/x64/code-stubs-x64.cc
  59. 8
      deps/v8/src/x64/macro-assembler-x64.h
  60. 14
      deps/v8/src/x64/regexp-macro-assembler-x64.cc
  61. 1
      deps/v8/src/x64/regexp-macro-assembler-x64.h
  62. 2
      deps/v8/test/cctest/test-alloc.cc
  63. 40
      deps/v8/test/cctest/test-api.cc
  64. 6
      deps/v8/test/cctest/test-debug.cc
  65. 2
      deps/v8/test/cctest/test-decls.cc
  66. 117
      deps/v8/test/cctest/test-heap.cc
  67. 2
      deps/v8/test/cctest/test-log.cc
  68. 22
      deps/v8/test/cctest/test-mark-compact.cc
  69. 6
      deps/v8/test/cctest/test-strtod.cc
  70. 227
      deps/v8/test/mjsunit/int32-ops.js
  71. 58
      deps/v8/test/mjsunit/regexp.js
  72. 15
      deps/v8/test/mjsunit/smi-ops.js
  73. 1
      deps/v8/tools/gyp/v8.gyp
  74. 955
      deps/v8/tools/ll_prof.py
  75. 6
      deps/v8/tools/v8.xcodeproj/project.pbxproj
  76. 4
      deps/v8/tools/visual_studio/v8_base.vcproj
  77. 4
      deps/v8/tools/visual_studio/v8_base_arm.vcproj
  78. 4
      deps/v8/tools/visual_studio/v8_base_x64.vcproj

17
deps/v8/ChangeLog

@ -1,3 +1,20 @@
2010-10-20: Version 2.5.1
Fixed bug causing spurious out of memory exceptions
(issue http://crbug.com/54580).
Fixed compilation error on Solaris platform (issue 901).
Fixed error in strtod (string to floating point number conversion)
due to glibc's use of 80-bit floats in the FPU on 32-bit linux.
Adjusted randomized allocations of executable memory to have 64k
granularity (issue http://crbug.com/56036).
Supported profiling using kernel perf_events on linux. Added ll_prof
script to tools and --ll-prof flag to V8.
2010-10-18: Version 2.5.0 2010-10-18: Version 2.5.0
Fixed bug in cache handling of lastIndex on global regexps Fixed bug in cache handling of lastIndex on global regexps

6
deps/v8/SConstruct

@ -664,17 +664,17 @@ SIMPLE_OPTIONS = {
'toolchain': { 'toolchain': {
'values': ['gcc', 'msvc'], 'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS, 'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')' 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
}, },
'os': { 'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'default': OS_GUESS, 'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')' 'help': 'the os to build for (%s)' % OS_GUESS
}, },
'arch': { 'arch': {
'values':['arm', 'ia32', 'x64', 'mips'], 'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS, 'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')' 'help': 'the architecture to build for (%s)' % ARCH_GUESS
}, },
'regexp': { 'regexp': {
'values': ['native', 'interpreted'], 'values': ['native', 'interpreted'],

1
deps/v8/src/SConscript

@ -116,7 +116,6 @@ SOURCES = {
variables.cc variables.cc
version.cc version.cc
virtual-frame.cc virtual-frame.cc
vm-state.cc
zone.cc zone.cc
"""), """),
'arch:arm': Split(""" 'arch:arm': Split("""

15
deps/v8/src/arm/code-stubs-arm.cc

@ -935,11 +935,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ orr(r2, r1, r0); __ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &not_two_smis); __ b(ne, &not_two_smis);
__ sub(r0, r1, r0, SetCC); __ mov(r1, Operand(r1, ASR, 1));
__ b(vc, &smi_done); __ sub(r0, r1, Operand(r0, ASR, 1));
// Correct the sign in case of overflow.
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
__ bind(&smi_done);
__ Ret(); __ Ret();
__ bind(&not_two_smis); __ bind(&not_two_smis);
} else if (FLAG_debug_code) { } else if (FLAG_debug_code) {
@ -2300,13 +2297,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
void StackCheckStub::Generate(MacroAssembler* masm) { void StackCheckStub::Generate(MacroAssembler* masm) {
// Do tail-call to runtime routine. Runtime routines expect at least one __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
__ Ret();
} }

14
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -142,7 +142,6 @@ int RegExpMacroAssemblerARM::stack_limit_slack() {
void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) { void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
if (by != 0) { if (by != 0) {
Label inside_string;
__ add(current_input_offset(), __ add(current_input_offset(),
current_input_offset(), Operand(by * char_size())); current_input_offset(), Operand(by * char_size()));
} }
@ -927,6 +926,19 @@ void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
} }
void RegExpMacroAssemblerARM::SetCurrentPositionFromEnd(int by) {
Label after_position;
__ cmp(current_input_offset(), Operand(-by * char_size()));
__ b(ge, &after_position);
__ mov(current_input_offset(), Operand(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
// We have advanced the position, so it's safe to read backwards.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&after_position);
}
void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions! ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ mov(r0, Operand(to)); __ mov(r0, Operand(to));

1
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -100,6 +100,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
StackCheckFlag check_stack_limit); StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg); virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);

49
deps/v8/src/ast.cc

@ -398,39 +398,70 @@ Interval RegExpQuantifier::CaptureRegisters() {
} }
bool RegExpAssertion::IsAnchored() { bool RegExpAssertion::IsAnchoredAtStart() {
return type() == RegExpAssertion::START_OF_INPUT; return type() == RegExpAssertion::START_OF_INPUT;
} }
bool RegExpAlternative::IsAnchored() { bool RegExpAssertion::IsAnchoredAtEnd() {
return type() == RegExpAssertion::END_OF_INPUT;
}
bool RegExpAlternative::IsAnchoredAtStart() {
ZoneList<RegExpTree*>* nodes = this->nodes(); ZoneList<RegExpTree*>* nodes = this->nodes();
for (int i = 0; i < nodes->length(); i++) { for (int i = 0; i < nodes->length(); i++) {
RegExpTree* node = nodes->at(i); RegExpTree* node = nodes->at(i);
if (node->IsAnchored()) { return true; } if (node->IsAnchoredAtStart()) { return true; }
if (node->max_match() > 0) { return false; }
}
return false;
}
bool RegExpAlternative::IsAnchoredAtEnd() {
ZoneList<RegExpTree*>* nodes = this->nodes();
for (int i = nodes->length() - 1; i >= 0; i--) {
RegExpTree* node = nodes->at(i);
if (node->IsAnchoredAtEnd()) { return true; }
if (node->max_match() > 0) { return false; } if (node->max_match() > 0) { return false; }
} }
return false; return false;
} }
bool RegExpDisjunction::IsAnchored() { bool RegExpDisjunction::IsAnchoredAtStart() {
ZoneList<RegExpTree*>* alternatives = this->alternatives(); ZoneList<RegExpTree*>* alternatives = this->alternatives();
for (int i = 0; i < alternatives->length(); i++) { for (int i = 0; i < alternatives->length(); i++) {
if (!alternatives->at(i)->IsAnchored()) if (!alternatives->at(i)->IsAnchoredAtStart())
return false; return false;
} }
return true; return true;
} }
bool RegExpLookahead::IsAnchored() { bool RegExpDisjunction::IsAnchoredAtEnd() {
return is_positive() && body()->IsAnchored(); ZoneList<RegExpTree*>* alternatives = this->alternatives();
for (int i = 0; i < alternatives->length(); i++) {
if (!alternatives->at(i)->IsAnchoredAtEnd())
return false;
}
return true;
}
bool RegExpLookahead::IsAnchoredAtStart() {
return is_positive() && body()->IsAnchoredAtStart();
}
bool RegExpCapture::IsAnchoredAtStart() {
return body()->IsAnchoredAtStart();
} }
bool RegExpCapture::IsAnchored() { bool RegExpCapture::IsAnchoredAtEnd() {
return body()->IsAnchored(); return body()->IsAnchoredAtEnd();
} }

17
deps/v8/src/ast.h

@ -1523,7 +1523,8 @@ class RegExpTree: public ZoneObject {
virtual RegExpNode* ToNode(RegExpCompiler* compiler, virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) = 0; RegExpNode* on_success) = 0;
virtual bool IsTextElement() { return false; } virtual bool IsTextElement() { return false; }
virtual bool IsAnchored() { return false; } virtual bool IsAnchoredAtStart() { return false; }
virtual bool IsAnchoredAtEnd() { return false; }
virtual int min_match() = 0; virtual int min_match() = 0;
virtual int max_match() = 0; virtual int max_match() = 0;
// Returns the interval of registers used for captures within this // Returns the interval of registers used for captures within this
@ -1548,7 +1549,8 @@ class RegExpDisjunction: public RegExpTree {
virtual RegExpDisjunction* AsDisjunction(); virtual RegExpDisjunction* AsDisjunction();
virtual Interval CaptureRegisters(); virtual Interval CaptureRegisters();
virtual bool IsDisjunction(); virtual bool IsDisjunction();
virtual bool IsAnchored(); virtual bool IsAnchoredAtStart();
virtual bool IsAnchoredAtEnd();
virtual int min_match() { return min_match_; } virtual int min_match() { return min_match_; }
virtual int max_match() { return max_match_; } virtual int max_match() { return max_match_; }
ZoneList<RegExpTree*>* alternatives() { return alternatives_; } ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
@ -1568,7 +1570,8 @@ class RegExpAlternative: public RegExpTree {
virtual RegExpAlternative* AsAlternative(); virtual RegExpAlternative* AsAlternative();
virtual Interval CaptureRegisters(); virtual Interval CaptureRegisters();
virtual bool IsAlternative(); virtual bool IsAlternative();
virtual bool IsAnchored(); virtual bool IsAnchoredAtStart();
virtual bool IsAnchoredAtEnd();
virtual int min_match() { return min_match_; } virtual int min_match() { return min_match_; }
virtual int max_match() { return max_match_; } virtual int max_match() { return max_match_; }
ZoneList<RegExpTree*>* nodes() { return nodes_; } ZoneList<RegExpTree*>* nodes() { return nodes_; }
@ -1595,7 +1598,8 @@ class RegExpAssertion: public RegExpTree {
RegExpNode* on_success); RegExpNode* on_success);
virtual RegExpAssertion* AsAssertion(); virtual RegExpAssertion* AsAssertion();
virtual bool IsAssertion(); virtual bool IsAssertion();
virtual bool IsAnchored(); virtual bool IsAnchoredAtStart();
virtual bool IsAnchoredAtEnd();
virtual int min_match() { return 0; } virtual int min_match() { return 0; }
virtual int max_match() { return 0; } virtual int max_match() { return 0; }
Type type() { return type_; } Type type() { return type_; }
@ -1768,7 +1772,8 @@ class RegExpCapture: public RegExpTree {
RegExpCompiler* compiler, RegExpCompiler* compiler,
RegExpNode* on_success); RegExpNode* on_success);
virtual RegExpCapture* AsCapture(); virtual RegExpCapture* AsCapture();
virtual bool IsAnchored(); virtual bool IsAnchoredAtStart();
virtual bool IsAnchoredAtEnd();
virtual Interval CaptureRegisters(); virtual Interval CaptureRegisters();
virtual bool IsCapture(); virtual bool IsCapture();
virtual int min_match() { return body_->min_match(); } virtual int min_match() { return body_->min_match(); }
@ -1800,7 +1805,7 @@ class RegExpLookahead: public RegExpTree {
virtual RegExpLookahead* AsLookahead(); virtual RegExpLookahead* AsLookahead();
virtual Interval CaptureRegisters(); virtual Interval CaptureRegisters();
virtual bool IsLookahead(); virtual bool IsLookahead();
virtual bool IsAnchored(); virtual bool IsAnchoredAtStart();
virtual int min_match() { return 0; } virtual int min_match() { return 0; }
virtual int max_match() { return 0; } virtual int max_match() { return 0; }
RegExpTree* body() { return body_; } RegExpTree* body() { return body_; }

5
deps/v8/src/bootstrapper.cc

@ -1814,6 +1814,11 @@ Genesis::Genesis(Handle<Object> global_object,
i::Counters::contexts_created_from_scratch.Increment(); i::Counters::contexts_created_from_scratch.Increment();
} }
// Add this context to the weak list of global contexts.
(*global_context_)->set(Context::NEXT_CONTEXT_LINK,
Heap::global_contexts_list());
Heap::set_global_contexts_list(*global_context_);
result_ = global_context_; result_ = global_context_;
} }

3
deps/v8/src/bytecodes-irregexp.h

@ -88,7 +88,8 @@ V(CHECK_REGISTER_EQ_POS, 43, 8) /* bc8 reg_idx24 addr32 */ \
V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \ V(CHECK_AT_START, 44, 8) /* bc8 pad24 addr32 */ \
V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \ V(CHECK_NOT_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \ V(CHECK_GREEDY, 46, 8) /* bc8 pad24 addr32 */ \
V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ V(ADVANCE_CP_AND_GOTO, 47, 8) /* bc8 offset24 addr32 */ \
V(SET_CURRENT_POSITION_FROM_END, 48, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \ #define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code; static const int BC_##name = code;

21
deps/v8/src/contexts.h

@ -225,7 +225,15 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX, OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX, MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX, CONTEXT_DATA_INDEX,
GLOBAL_CONTEXT_SLOTS
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
NEXT_CONTEXT_LINK,
// Total number of slots.
GLOBAL_CONTEXT_SLOTS,
FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
}; };
// Direct slot access. // Direct slot access.
@ -333,6 +341,17 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag; return kHeaderSize + index * kPointerSize - kHeapObjectTag;
} }
static const int kSize = kHeaderSize + GLOBAL_CONTEXT_SLOTS * kPointerSize;
// GC support.
typedef FixedBodyDescriptor<
kHeaderSize, kSize, kSize> ScavengeBodyDescriptor;
typedef FixedBodyDescriptor<
kHeaderSize,
kHeaderSize + FIRST_WEAK_SLOT * kPointerSize,
kSize> MarkCompactBodyDescriptor;
private: private:
// Unchecked access to the slots. // Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); } Object* unchecked_previous() { return get(PREVIOUS_INDEX); }

44
deps/v8/src/cpu-profiler.cc

@ -188,6 +188,20 @@ bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
} }
void ProfilerEventsProcessor::ProcessMovedFunctions() {
for (int i = 0; i < moved_functions_.length(); ++i) {
JSFunction* function = moved_functions_[i];
CpuProfiler::FunctionCreateEvent(function);
}
moved_functions_.Clear();
}
void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
moved_functions_.Add(function);
}
void ProfilerEventsProcessor::RegExpCodeCreateEvent( void ProfilerEventsProcessor::RegExpCodeCreateEvent(
Logger::LogEventsAndTags tag, Logger::LogEventsAndTags tag,
const char* prefix, const char* prefix,
@ -426,8 +440,12 @@ void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
} }
void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function, void CpuProfiler::ProcessMovedFunctions() {
HeapObject* source) { singleton_->processor_->ProcessMovedFunctions();
}
void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
// This function is called from GC iterators (during Scavenge, // This function is called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's // MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here. // why unchecked accessors are used here.
@ -436,27 +454,7 @@ void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile) if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
|| singleton_->processor_->IsKnownFunction(function->address())) return; || singleton_->processor_->IsKnownFunction(function->address())) return;
int security_token_id = TokenEnumerator::kNoSecurityToken; singleton_->processor_->RememberMovedFunction(function);
// In debug mode, assertions may fail for contexts,
// and we can live without security tokens in debug mode.
#ifndef DEBUG
if (function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
function->context()->global_context()->security_token());
}
// Security token may not be moved yet.
if (security_token_id == TokenEnumerator::kNoSecurityToken) {
JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
if (old_function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
old_function->context()->global_context()->security_token());
}
}
#endif
singleton_->processor_->FunctionCreateEvent(
function->address(),
function->unchecked_code()->address(),
security_token_id);
} }

8
deps/v8/src/cpu-profiler.h

@ -165,6 +165,8 @@ class ProfilerEventsProcessor : public Thread {
// Puts current stack into tick sample events buffer. // Puts current stack into tick sample events buffer.
void AddCurrentStack(); void AddCurrentStack();
bool IsKnownFunction(Address start); bool IsKnownFunction(Address start);
void ProcessMovedFunctions();
void RememberMovedFunction(JSFunction* function);
// Tick sample events are filled directly in the buffer of the circular // Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all // queue (because the structure is of fixed width, but usually not all
@ -202,6 +204,7 @@ class ProfilerEventsProcessor : public Thread {
// Used from the VM thread. // Used from the VM thread.
HashMap* known_functions_; HashMap* known_functions_;
List<JSFunction*> moved_functions_;
}; };
} } // namespace v8::internal } } // namespace v8::internal
@ -251,17 +254,18 @@ class CpuProfiler {
String* source, int line); String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag, static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count); Code* code, int args_count);
static void CodeMovingGCEvent() {}
static void CodeMoveEvent(Address from, Address to); static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from); static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function); static void FunctionCreateEvent(JSFunction* function);
// Reports function creation in case we had missed it (e.g. // Reports function creation in case we had missed it (e.g.
// if it was created from compiled code). // if it was created from compiled code).
static void FunctionCreateEventFromMove(JSFunction* function, static void FunctionCreateEventFromMove(JSFunction* function);
HeapObject* source);
static void FunctionMoveEvent(Address from, Address to); static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from); static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point); static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source); static void RegExpCodeCreateEvent(Code* code, String* source);
static void ProcessMovedFunctions();
static void SetterCallbackEvent(String* name, Address entry_point); static void SetterCallbackEvent(String* name, Address entry_point);
static INLINE(bool is_profiling()) { static INLINE(bool is_profiling()) {

1
deps/v8/src/flag-definitions.h

@ -412,6 +412,7 @@ DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.") "Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.") DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
// //
// Heap protection flags // Heap protection flags

11
deps/v8/src/heap-inl.h

@ -76,7 +76,7 @@ Object* Heap::AllocateRaw(int size_in_bytes,
if (FLAG_gc_interval >= 0 && if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ && !disallow_allocation_failure_ &&
Heap::allocation_timeout_-- <= 0) { Heap::allocation_timeout_-- <= 0) {
return Failure::RetryAfterGC(size_in_bytes, space); return Failure::RetryAfterGC(space);
} }
Counters::objs_since_last_full.Increment(); Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment(); Counters::objs_since_last_young.Increment();
@ -389,8 +389,12 @@ void Heap::SetLastScriptId(Object* last_script_id) {
} }
#ifdef DEBUG
#define GC_GREEDY_CHECK() \ #define GC_GREEDY_CHECK() \
ASSERT(!FLAG_gc_greedy || v8::internal::Heap::GarbageCollectionGreedyCheck()) if (FLAG_gc_greedy) v8::internal::Heap::GarbageCollectionGreedyCheck()
#else
#define GC_GREEDY_CHECK() { }
#endif
// Calls the FUNCTION_CALL function and retries it up to three times // Calls the FUNCTION_CALL function and retries it up to three times
@ -409,8 +413,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
} \ } \
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \ if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
Heap::CollectGarbage(Failure::cast(__object__)->requested(), \ Heap::CollectGarbage(Failure::cast(__object__)->allocation_space()); \
Failure::cast(__object__)->allocation_space()); \
__object__ = FUNCTION_CALL; \ __object__ = FUNCTION_CALL; \
if (!__object__->IsFailure()) RETURN_VALUE; \ if (!__object__->IsFailure()) RETURN_VALUE; \
if (__object__->IsOutOfMemoryFailure()) { \ if (__object__->IsOutOfMemoryFailure()) { \

122
deps/v8/src/heap.cc

@ -54,6 +54,7 @@ namespace internal {
String* Heap::hidden_symbol_; String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength]; Object* Heap::roots_[Heap::kRootListLength];
Object* Heap::global_contexts_list_;
NewSpace Heap::new_space_; NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL; OldSpace* Heap::old_pointer_space_ = NULL;
@ -420,7 +421,7 @@ void Heap::CollectAllGarbage(bool force_compaction,
// not matter, so long as we do not specify NEW_SPACE, which would not // not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC. // cause a full GC.
MarkCompactCollector::SetForceCompaction(force_compaction); MarkCompactCollector::SetForceCompaction(force_compaction);
CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy); CollectGarbage(OLD_POINTER_SPACE, collectionPolicy);
MarkCompactCollector::SetForceCompaction(false); MarkCompactCollector::SetForceCompaction(false);
} }
@ -431,8 +432,7 @@ void Heap::CollectAllAvailableGarbage() {
} }
bool Heap::CollectGarbage(int requested_size, void Heap::CollectGarbage(AllocationSpace space,
AllocationSpace space,
CollectionPolicy collectionPolicy) { CollectionPolicy collectionPolicy) {
// The VM is in the GC state until exiting this function. // The VM is in the GC state until exiting this function.
VMState state(GC); VMState state(GC);
@ -469,25 +469,8 @@ bool Heap::CollectGarbage(int requested_size,
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) HeapProfiler::WriteSample(); if (FLAG_log_gc) HeapProfiler::WriteSample();
if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif #endif
switch (space) {
case NEW_SPACE:
return new_space_.Available() >= requested_size;
case OLD_POINTER_SPACE:
return old_pointer_space_->Available() >= requested_size;
case OLD_DATA_SPACE:
return old_data_space_->Available() >= requested_size;
case CODE_SPACE:
return code_space_->Available() >= requested_size;
case MAP_SPACE:
return map_space_->Available() >= requested_size;
case CELL_SPACE:
return cell_space_->Available() >= requested_size;
case LO_SPACE:
return lo_space_->Available() >= requested_size;
}
return false;
} }
@ -542,27 +525,27 @@ void Heap::ReserveSpace(
while (gc_performed) { while (gc_performed) {
gc_performed = false; gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) { if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(new_space_size, NEW_SPACE); Heap::CollectGarbage(NEW_SPACE);
gc_performed = true; gc_performed = true;
} }
if (!old_pointer_space->ReserveSpace(pointer_space_size)) { if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE); Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true; gc_performed = true;
} }
if (!(old_data_space->ReserveSpace(data_space_size))) { if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE); Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true; gc_performed = true;
} }
if (!(code_space->ReserveSpace(code_space_size))) { if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(code_space_size, CODE_SPACE); Heap::CollectGarbage(CODE_SPACE);
gc_performed = true; gc_performed = true;
} }
if (!(map_space->ReserveSpace(map_space_size))) { if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(map_space_size, MAP_SPACE); Heap::CollectGarbage(MAP_SPACE);
gc_performed = true; gc_performed = true;
} }
if (!(cell_space->ReserveSpace(cell_space_size))) { if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(cell_space_size, CELL_SPACE); Heap::CollectGarbage(CELL_SPACE);
gc_performed = true; gc_performed = true;
} }
// We add a slack-factor of 2 in order to have space for a series of // We add a slack-factor of 2 in order to have space for a series of
@ -574,7 +557,7 @@ void Heap::ReserveSpace(
large_object_size += cell_space_size + map_space_size + code_space_size + large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size; data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) { if (!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(large_object_size, LO_SPACE); Heap::CollectGarbage(LO_SPACE);
gc_performed = true; gc_performed = true;
} }
} }
@ -624,19 +607,14 @@ void Heap::ClearJSFunctionResultCaches() {
} }
class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
virtual void VisitThread(ThreadLocalTop* top) {
Context* context = top->context_;
if (context == NULL) return;
context->global()->global_context()->normalized_map_cache()->Clear();
}
};
void Heap::ClearNormalizedMapCaches() { void Heap::ClearNormalizedMapCaches() {
if (Bootstrapper::IsActive()) return; if (Bootstrapper::IsActive()) return;
ClearThreadNormalizedMapCachesVisitor visitor;
ThreadManager::IterateArchivedThreads(&visitor); Object* context = global_contexts_list_;
while (!context->IsUndefined()) {
Context::cast(context)->normalized_map_cache()->Clear();
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
} }
@ -685,6 +663,10 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
void Heap::PerformGarbageCollection(GarbageCollector collector, void Heap::PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer, GCTracer* tracer,
CollectionPolicy collectionPolicy) { CollectionPolicy collectionPolicy) {
if (collector != SCAVENGER) {
PROFILE(CodeMovingGCEvent());
}
VerifySymbolTable(); VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_); ASSERT(!allocation_allowed_);
@ -1034,6 +1016,9 @@ void Heap::Scavenge() {
} }
} }
// Scavenge object reachable from the global contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front); new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable( UpdateNewSpaceReferencesInExternalStringTable(
@ -1101,6 +1086,44 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
} }
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* head = undefined_value();
Context* tail = NULL;
Object* candidate = global_contexts_list_;
while (!candidate->IsUndefined()) {
// Check whether to keep the candidate in the list.
Context* candidate_context = reinterpret_cast<Context*>(candidate);
Object* retain = retainer->RetainAs(candidate);
if (retain != NULL) {
if (head->IsUndefined()) {
// First element in the list.
head = candidate_context;
} else {
// Subsequent elements in the list.
ASSERT(tail != NULL);
tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
candidate_context,
UPDATE_WRITE_BARRIER);
}
// Retained context is new tail.
tail = candidate_context;
}
// Move to next element in the list.
candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
}
// Terminate the list if there is one or more elements.
if (tail != NULL) {
tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
Heap::undefined_value(),
UPDATE_WRITE_BARRIER);
}
// Update the head of the list of contexts.
Heap::global_contexts_list_ = head;
}
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> { class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public: public:
static inline void VisitPointer(Object** p) { static inline void VisitPointer(Object** p) {
@ -1157,6 +1180,9 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray); table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray); table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitGlobalContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
VisitSpecialized<Context::kSize>);
typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject; typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
@ -1235,7 +1261,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (Logger::is_logging() || CpuProfiler::is_profiling()) { if (Logger::is_logging() || CpuProfiler::is_profiling()) {
if (target->IsJSFunction()) { if (target->IsJSFunction()) {
PROFILE(FunctionMoveEvent(source->address(), target->address())); PROFILE(FunctionMoveEvent(source->address(), target->address()));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source)); PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
} }
} }
#endif #endif
@ -1647,7 +1673,9 @@ bool Heap::CreateInitialMaps() {
obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_global_context_map(Map::cast(obj)); Map* global_context_map = Map::cast(obj);
global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
set_global_context_map(global_context_map);
obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
SharedFunctionInfo::kAlignedSize); SharedFunctionInfo::kAlignedSize);
@ -3431,7 +3459,7 @@ bool Heap::IdleNotification() {
HistogramTimerScope scope(&Counters::gc_context); HistogramTimerScope scope(&Counters::gc_context);
CollectAllGarbage(false); CollectAllGarbage(false);
} else { } else {
CollectGarbage(0, NEW_SPACE); CollectGarbage(NEW_SPACE);
} }
new_space_.Shrink(); new_space_.Shrink();
last_gc_count = gc_count_; last_gc_count = gc_count_;
@ -4236,6 +4264,8 @@ bool Heap::Setup(bool create_heap_objects) {
// Create initial objects // Create initial objects
if (!CreateInitialObjects()) return false; if (!CreateInitialObjects()) return false;
global_contexts_list_ = undefined_value();
} }
LOG(IntPtrTEvent("heap-capacity", Capacity())); LOG(IntPtrTEvent("heap-capacity", Capacity()));
@ -4937,11 +4967,11 @@ int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
#ifdef DEBUG #ifdef DEBUG
bool Heap::GarbageCollectionGreedyCheck() { void Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy); ASSERT(FLAG_gc_greedy);
if (Bootstrapper::IsActive()) return true; if (Bootstrapper::IsActive()) return;
if (disallow_allocation_failure()) return true; if (disallow_allocation_failure()) return;
return CollectGarbage(0, NEW_SPACE); CollectGarbage(NEW_SPACE);
} }
#endif #endif

35
deps/v8/src/heap.h

@ -202,9 +202,10 @@ namespace internal {
V(closure_symbol, "(closure)") V(closure_symbol, "(closure)")
// Forward declaration of the GCTracer class. // Forward declarations.
class GCTracer; class GCTracer;
class HeapStats; class HeapStats;
class WeakObjectRetainer;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer); typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
@ -696,8 +697,7 @@ class Heap : public AllStatic {
// Performs garbage collection operation. // Performs garbage collection operation.
// Returns whether required_space bytes are available after the collection. // Returns whether required_space bytes are available after the collection.
static bool CollectGarbage(int required_space, static void CollectGarbage(AllocationSpace space,
AllocationSpace space,
CollectionPolicy collectionPolicy = NORMAL); CollectionPolicy collectionPolicy = NORMAL);
// Performs a full garbage collection. Force compaction if the // Performs a full garbage collection. Force compaction if the
@ -717,7 +717,7 @@ class Heap : public AllStatic {
#ifdef DEBUG #ifdef DEBUG
// Utility used with flag gc-greedy. // Utility used with flag gc-greedy.
static bool GarbageCollectionGreedyCheck(); static void GarbageCollectionGreedyCheck();
#endif #endif
static void AddGCPrologueCallback( static void AddGCPrologueCallback(
@ -767,6 +767,11 @@ class Heap : public AllStatic {
// not match the empty string. // not match the empty string.
static String* hidden_symbol() { return hidden_symbol_; } static String* hidden_symbol() { return hidden_symbol_; }
static void set_global_contexts_list(Object* object) {
global_contexts_list_ = object;
}
static Object* global_contexts_list() { return global_contexts_list_; }
// Iterates over all roots in the heap. // Iterates over all roots in the heap.
static void IterateRoots(ObjectVisitor* v, VisitMode mode); static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap. // Iterates over all strong roots in the heap.
@ -870,6 +875,11 @@ class Heap : public AllStatic {
// Generated code can embed this address to get access to the roots. // Generated code can embed this address to get access to the roots.
static Object** roots_address() { return roots_; } static Object** roots_address() { return roots_; }
// Get address of global contexts list for serialization support.
static Object** global_contexts_list_address() {
return &global_contexts_list_;
}
#ifdef DEBUG #ifdef DEBUG
static void Print(); static void Print();
static void PrintHandles(); static void PrintHandles();
@ -1051,6 +1061,8 @@ class Heap : public AllStatic {
static void UpdateNewSpaceReferencesInExternalStringTable( static void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func); ExternalStringTableUpdaterCallback updater_func);
static void ProcessWeakReferences(WeakObjectRetainer* retainer);
// Helper function that governs the promotion policy from new space to // Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age // old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space, // mark or if we've already filled the bottom 1/16th of the to space,
@ -1157,6 +1169,8 @@ class Heap : public AllStatic {
static Object* roots_[kRootListLength]; static Object* roots_[kRootListLength];
static Object* global_contexts_list_;
struct StringTypeTable { struct StringTypeTable {
InstanceType type; InstanceType type;
int size; int size;
@ -2043,6 +2057,19 @@ class ExternalStringTable : public AllStatic {
static List<Object*> old_space_strings_; static List<Object*> old_space_strings_;
}; };
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
virtual ~WeakObjectRetainer() {}
// Return whether this object should be retained. If NULL is returned the
// object has no references. Otherwise the address of the retained object
// should be returned as in some GC situations the object has been moved.
virtual Object* RetainAs(Object* object) = 0;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_HEAP_H_ #endif // V8_HEAP_H_

13
deps/v8/src/ia32/code-stubs-ia32.cc

@ -2638,7 +2638,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &non_smi, not_taken); __ j(not_zero, &non_smi, not_taken);
__ sub(edx, Operand(eax)); // Return on the result of the subtraction. __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done); __ j(no_overflow, &smi_done);
__ neg(edx); // Correct sign in case of overflow. __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
__ bind(&smi_done); __ bind(&smi_done);
__ mov(eax, edx); __ mov(eax, edx);
__ ret(0); __ ret(0);
@ -2964,16 +2964,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
void StackCheckStub::Generate(MacroAssembler* masm) { void StackCheckStub::Generate(MacroAssembler* masm) {
// Because builtins always remove the receiver from the stack, we __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
// have to fake one to avoid underflowing the stack. The receiver
// must be inserted below the return address on the stack so we
// temporarily store that in a register.
__ pop(eax);
__ push(Immediate(Smi::FromInt(0)));
__ push(eax);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
} }

12
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -133,7 +133,6 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() {
void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) { void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
if (by != 0) { if (by != 0) {
Label inside_string;
__ add(Operand(edi), Immediate(by * char_size())); __ add(Operand(edi), Immediate(by * char_size()));
} }
} }
@ -964,6 +963,17 @@ void RegExpMacroAssemblerIA32::ReadStackPointerFromRegister(int reg) {
__ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); __ add(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
} }
void RegExpMacroAssemblerIA32::SetCurrentPositionFromEnd(int by) {
NearLabel after_position;
__ cmp(edi, -by * char_size());
__ j(greater_equal, &after_position);
__ mov(edi, -by * char_size());
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
// We have advanced the position, so it's safe to read backwards.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&after_position);
}
void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) { void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions! ASSERT(register_index >= num_saved_registers_); // Reserved for positions!

1
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -98,6 +98,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
StackCheckFlag check_stack_limit); StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg); virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);

9
deps/v8/src/interpreter-irregexp.cc

@ -607,6 +607,15 @@ static bool RawMatch(const byte* code_base,
pc = code_base + Load32Aligned(pc + 4); pc = code_base + Load32Aligned(pc + 4);
} }
break; break;
BYTECODE(SET_CURRENT_POSITION_FROM_END) {
int by = static_cast<uint32_t>(insn) >> BYTECODE_SHIFT;
if (subject.length() - current > by) {
current = subject.length() - by;
current_char = subject[current - 1];
}
pc += BC_SET_CURRENT_POSITION_FROM_END_LENGTH;
break;
}
default: default:
UNREACHABLE(); UNREACHABLE();
break; break;

14
deps/v8/src/jsregexp.cc

@ -5180,7 +5180,10 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
&compiler, &compiler,
compiler.accept()); compiler.accept());
RegExpNode* node = captured_body; RegExpNode* node = captured_body;
if (!data->tree->IsAnchored()) { bool is_end_anchored = data->tree->IsAnchoredAtEnd();
bool is_start_anchored = data->tree->IsAnchoredAtStart();
int max_length = data->tree->max_match();
if (!is_start_anchored) {
// Add a .*? at the beginning, outside the body capture, unless // Add a .*? at the beginning, outside the body capture, unless
// this expression is anchored at the beginning. // this expression is anchored at the beginning.
RegExpNode* loop_node = RegExpNode* loop_node =
@ -5236,6 +5239,15 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
RegExpMacroAssemblerIrregexp macro_assembler(codes); RegExpMacroAssemblerIrregexp macro_assembler(codes);
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
static const int kMaxBacksearchLimit = 1024;
if (is_end_anchored &&
!is_start_anchored &&
max_length < kMaxBacksearchLimit) {
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
return compiler.Assemble(&macro_assembler, return compiler.Assemble(&macro_assembler,
node, node,
data->capture_count, data->capture_count,

16
deps/v8/src/log-utils.cc

@ -122,6 +122,7 @@ int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
bool Log::is_stopped_ = false; bool Log::is_stopped_ = false;
Log::WritePtr Log::Write = NULL; Log::WritePtr Log::Write = NULL;
FILE* Log::output_handle_ = NULL; FILE* Log::output_handle_ = NULL;
FILE* Log::output_code_handle_ = NULL;
LogDynamicBuffer* Log::output_buffer_ = NULL; LogDynamicBuffer* Log::output_buffer_ = NULL;
// Must be the same message as in Logger::PauseProfiler. // Must be the same message as in Logger::PauseProfiler.
const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n"; const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
@ -143,9 +144,22 @@ void Log::OpenStdout() {
} }
static const char kCodeLogExt[] = ".code";
void Log::OpenFile(const char* name) { void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled()); ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode); output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
if (FLAG_ll_prof) {
// Open a file for logging the contents of code objects so that
// they can be disassembled later.
size_t name_len = strlen(name);
ScopedVector<char> code_name(
static_cast<int>(name_len + sizeof(kCodeLogExt)));
memcpy(code_name.start(), name, name_len);
memcpy(code_name.start() + name_len, kCodeLogExt, sizeof(kCodeLogExt));
output_code_handle_ = OS::FOpen(code_name.start(), OS::LogFileOpenMode);
}
Write = WriteToFile; Write = WriteToFile;
Init(); Init();
} }
@ -165,6 +179,8 @@ void Log::Close() {
if (Write == WriteToFile) { if (Write == WriteToFile) {
if (output_handle_ != NULL) fclose(output_handle_); if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL; output_handle_ = NULL;
if (output_code_handle_ != NULL) fclose(output_code_handle_);
output_code_handle_ = NULL;
} else if (Write == WriteToMemory) { } else if (Write == WriteToMemory) {
delete output_buffer_; delete output_buffer_;
output_buffer_ = NULL; output_buffer_ = NULL;

4
deps/v8/src/log-utils.h

@ -152,6 +152,9 @@ class Log : public AllStatic {
// mutex_ should be acquired before using output_handle_ or output_buffer_. // mutex_ should be acquired before using output_handle_ or output_buffer_.
static FILE* output_handle_; static FILE* output_handle_;
// Used when low-level profiling is active to save code object contents.
static FILE* output_code_handle_;
static LogDynamicBuffer* output_buffer_; static LogDynamicBuffer* output_buffer_;
// Size of dynamic buffer block (and dynamic buffer initial size). // Size of dynamic buffer block (and dynamic buffer initial size).
@ -171,6 +174,7 @@ class Log : public AllStatic {
// mutex_ should be acquired before using it. // mutex_ should be acquired before using it.
static char* message_buffer_; static char* message_buffer_;
friend class Logger;
friend class LogMessageBuilder; friend class LogMessageBuilder;
friend class LogRecordCompressor; friend class LogRecordCompressor;
}; };

60
deps/v8/src/log.cc

@ -191,11 +191,12 @@ class Ticker: public Sampler {
~Ticker() { if (IsActive()) Stop(); } ~Ticker() { if (IsActive()) Stop(); }
void SampleStack(TickSample* sample) { virtual void SampleStack(TickSample* sample) {
ASSERT(IsSynchronous());
StackTracer::Trace(sample); StackTracer::Trace(sample);
} }
void Tick(TickSample* sample) { virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample); if (profiler_) profiler_->Insert(sample);
if (window_) window_->AddState(sample->state); if (window_) window_->AddState(sample->state);
} }
@ -765,6 +766,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(*p); msg.Append(*p);
} }
msg.Append('"'); msg.Append('"');
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -784,6 +786,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]); msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str); msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -808,6 +811,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"%s %s:%d\"", msg.Append(",%d,\"%s %s:%d\"",
code->ExecutableSize(), *str, *sourcestr, line); code->ExecutableSize(), *str, *sourcestr, line);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -825,6 +829,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]); msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count); msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -835,6 +840,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
} }
void Logger::CodeMovingGCEvent() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
LogMessageBuilder msg;
msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
msg.WriteToLogFile();
OS::SignalCodeMovingGC();
#endif
}
void Logger::RegExpCodeCreateEvent(Code* code, String* source) { void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
@ -845,6 +861,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.Append(",%d,\"", code->ExecutableSize()); msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false); msg.AppendDetailed(source, false);
msg.Append('\"'); msg.Append('\"');
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -909,8 +926,7 @@ void Logger::FunctionCreateEvent(JSFunction* function) {
} }
void Logger::FunctionCreateEventFromMove(JSFunction* function, void Logger::FunctionCreateEventFromMove(JSFunction* function) {
HeapObject*) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) { if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
FunctionCreateEvent(function); FunctionCreateEvent(function);
@ -1340,6 +1356,34 @@ void Logger::LogCodeObject(Object* object) {
} }
void Logger::LogCodeInfo() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
#elif V8_TARGET_ARCH_X64
const char arch[] = "x64";
#elif V8_TARGET_ARCH_ARM
const char arch[] = "arm";
#else
const char arch[] = "unknown";
#endif
LogMessageBuilder msg;
msg.Append("code-info,%s,%d\n", arch, Code::kHeaderSize);
msg.WriteToLogFile();
#endif // ENABLE_LOGGING_AND_PROFILING
}
void Logger::LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg) {
if (!FLAG_ll_prof || Log::output_code_handle_ == NULL) return;
int pos = static_cast<int>(ftell(Log::output_code_handle_));
fwrite(code->instruction_start(), 1, code->instruction_size(),
Log::output_code_handle_);
msg->Append(",%d", pos);
}
void Logger::LogCodeObjects() { void Logger::LogCodeObjects() {
AssertNoAllocation no_alloc; AssertNoAllocation no_alloc;
HeapIterator iterator; HeapIterator iterator;
@ -1451,6 +1495,12 @@ bool Logger::Setup() {
// --prof implies --log-code. // --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true; if (FLAG_prof) FLAG_log_code = true;
// --ll-prof implies --log-code and --log-snapshot-positions.
if (FLAG_ll_prof) {
FLAG_log_code = true;
FLAG_log_snapshot_positions = true;
}
// --prof_lazy controls --log-code, implies --noprof_auto. // --prof_lazy controls --log-code, implies --noprof_auto.
if (FLAG_prof_lazy) { if (FLAG_prof_lazy) {
FLAG_log_code = false; FLAG_log_code = false;
@ -1512,6 +1562,8 @@ bool Logger::Setup() {
ASSERT(VMState::is_outermost_external()); ASSERT(VMState::is_outermost_external());
if (FLAG_ll_prof) LogCodeInfo();
ticker_ = new Ticker(kSamplingIntervalMs); ticker_ = new Ticker(kSamplingIntervalMs);
if (FLAG_sliding_state_window && sliding_state_window_ == NULL) { if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {

11
deps/v8/src/log.h

@ -91,6 +91,7 @@ class CompressionHelper;
V(CODE_CREATION_EVENT, "code-creation", "cc") \ V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \ V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \ V(CODE_DELETE_EVENT, "code-delete", "cd") \
V(CODE_MOVING_GC, "code-moving-gc", "cg") \
V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \ V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
V(FUNCTION_MOVE_EVENT, "function-move", "fm") \ V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \ V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
@ -209,6 +210,7 @@ class Logger {
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name, static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
String* source, int line); String* source, int line);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count); static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
static void CodeMovingGCEvent();
// Emits a code create event for a RegExp. // Emits a code create event for a RegExp.
static void RegExpCodeCreateEvent(Code* code, String* source); static void RegExpCodeCreateEvent(Code* code, String* source);
// Emits a code move event. // Emits a code move event.
@ -217,8 +219,7 @@ class Logger {
static void CodeDeleteEvent(Address from); static void CodeDeleteEvent(Address from);
// Emits a function object create event. // Emits a function object create event.
static void FunctionCreateEvent(JSFunction* function); static void FunctionCreateEvent(JSFunction* function);
static void FunctionCreateEventFromMove(JSFunction* function, static void FunctionCreateEventFromMove(JSFunction* function);
HeapObject*);
// Emits a function move event. // Emits a function move event.
static void FunctionMoveEvent(Address from, Address to); static void FunctionMoveEvent(Address from, Address to);
// Emits a function delete event. // Emits a function delete event.
@ -317,6 +318,12 @@ class Logger {
// Used for logging stubs found in the snapshot. // Used for logging stubs found in the snapshot.
static void LogCodeObject(Object* code_object); static void LogCodeObject(Object* code_object);
// Emits general information about generated code.
static void LogCodeInfo();
// Handles code creation when low-level profiling is active.
static void LowLevelCodeCreateEvent(Code* code, LogMessageBuilder* msg);
// Emits a profiler tick event. Used by the profiler thread. // Emits a profiler tick event. Used by the profiler thread.
static void TickEvent(TickSample* sample, bool overflow); static void TickEvent(TickSample* sample, bool overflow);

35
deps/v8/src/mark-compact.cc

@ -282,6 +282,11 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor, FixedArray::BodyDescriptor,
void>::Visit); void>::Visit);
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticMarkingVisitor,
Context::MarkCompactBodyDescriptor,
void>::Visit);
table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo); table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
@ -578,6 +583,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
VisitPointers(SLOT_ADDR(object, VisitPointers(SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize), JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kSize)); SLOT_ADDR(object, JSFunction::kSize));
#undef SLOT_ADDR #undef SLOT_ADDR
} }
@ -738,6 +744,21 @@ class SymbolTableCleaner : public ObjectVisitor {
}; };
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
MapWord first_word = HeapObject::cast(object)->map_word();
if (first_word.IsMarked()) {
return object;
} else {
return NULL;
}
}
};
void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) { void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
ASSERT(!object->IsMarked()); ASSERT(!object->IsMarked());
ASSERT(Heap::Contains(object)); ASSERT(Heap::Contains(object));
@ -1069,6 +1090,10 @@ void MarkCompactCollector::MarkLiveObjects() {
ExternalStringTable::Iterate(&v); ExternalStringTable::Iterate(&v);
ExternalStringTable::CleanUp(); ExternalStringTable::CleanUp();
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
Heap::ProcessWeakReferences(&mark_compact_object_retainer);
// Remove object groups after marking phase. // Remove object groups after marking phase.
GlobalHandles::RemoveObjectGroups(); GlobalHandles::RemoveObjectGroups();
} }
@ -1639,6 +1664,9 @@ static void SweepNewSpace(NewSpace* space) {
} }
} }
// Update pointer from the global contexts list.
updating_visitor.VisitPointer(Heap::global_contexts_list_address());
// Update pointers from external string table. // Update pointers from external string table.
Heap::UpdateNewSpaceReferencesInExternalStringTable( Heap::UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry); &UpdateNewSpaceReferenceInExternalStringTableEntry);
@ -2245,6 +2273,9 @@ void MarkCompactCollector::UpdatePointers() {
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor); GlobalHandles::IterateWeakRoots(&updating_visitor);
// Update the pointer to the head of the weak list of global contexts.
updating_visitor.VisitPointer(&Heap::global_contexts_list_);
int live_maps_size = IterateLiveObjects(Heap::map_space(), int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject); &UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(), int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
@ -2522,7 +2553,7 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
HeapObject* copied_to = HeapObject::FromAddress(new_addr); HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) { if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr)); PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj)); PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
} }
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@ -2615,7 +2646,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
HeapObject* copied_to = HeapObject::FromAddress(new_addr); HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) { if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr)); PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj)); PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
} }
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));

38
deps/v8/src/objects-inl.h

@ -844,15 +844,6 @@ bool Failure::IsOutOfMemoryException() const {
} }
int Failure::requested() const {
const int kShiftBits =
kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
STATIC_ASSERT(kShiftBits >= 0);
ASSERT(type() == RETRY_AFTER_GC);
return static_cast<int>(value() >> kShiftBits);
}
AllocationSpace Failure::allocation_space() const { AllocationSpace Failure::allocation_space() const {
ASSERT_EQ(RETRY_AFTER_GC, type()); ASSERT_EQ(RETRY_AFTER_GC, type());
return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize) return static_cast<AllocationSpace>((value() >> kFailureTypeTagSize)
@ -881,20 +872,14 @@ intptr_t Failure::value() const {
} }
Failure* Failure::RetryAfterGC(int requested_bytes) { Failure* Failure::RetryAfterGC() {
// Assert that the space encoding fits in the three bytes allotted for it. return RetryAfterGC(NEW_SPACE);
ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
uintptr_t requested =
static_cast<uintptr_t>(requested_bytes >> kObjectAlignmentBits);
int tag_bits = kSpaceTagSize + kFailureTypeTagSize + kFailureTagSize;
if (((requested << tag_bits) >> tag_bits) != requested) {
// No room for entire requested size in the bits. Round down to
// maximally representable size.
requested = static_cast<intptr_t>(
(~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
} }
int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
return Construct(RETRY_AFTER_GC, value);
Failure* Failure::RetryAfterGC(AllocationSpace space) {
ASSERT((space & ~kSpaceTagMask) == 0);
return Construct(RETRY_AFTER_GC, space);
} }
@ -1485,6 +1470,15 @@ void FixedArray::set_unchecked(int index, Smi* value) {
} }
void FixedArray::set_unchecked(int index,
Object* value,
WriteBarrierMode mode) {
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(this, offset, mode);
}
void FixedArray::set_null_unchecked(int index) { void FixedArray::set_null_unchecked(int index) {
ASSERT(index >= 0 && index < this->length()); ASSERT(index >= 0 && index < this->length());
ASSERT(!Heap::InNewSpace(Heap::null_value())); ASSERT(!Heap::InNewSpace(Heap::null_value()));

6
deps/v8/src/objects-visiting.h

@ -50,6 +50,7 @@ class StaticVisitorBase : public AllStatic {
kVisitShortcutCandidate, kVisitShortcutCandidate,
kVisitByteArray, kVisitByteArray,
kVisitFixedArray, kVisitFixedArray,
kVisitGlobalContext,
// For data objects, JS objects and structs along with generic visitor which // For data objects, JS objects and structs along with generic visitor which
// can visit object of any size we provide visitors specialized by // can visit object of any size we provide visitors specialized by
@ -263,6 +264,11 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor, FixedArray::BodyDescriptor,
int>::Visit); int>::Visit);
table_.Register(kVisitGlobalContext,
&FixedBodyVisitor<StaticVisitor,
Context::ScavengeBodyDescriptor,
int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray); table_.Register(kVisitByteArray, &VisitByteArray);
table_.Register(kVisitSharedFunctionInfo, table_.Register(kVisitSharedFunctionInfo,

26
deps/v8/src/objects.cc

@ -574,28 +574,6 @@ void Failure::FailurePrint() {
} }
Failure* Failure::RetryAfterGC(int requested_bytes, AllocationSpace space) {
ASSERT((space & ~kSpaceTagMask) == 0);
// TODO(X64): Stop using Smi validation for non-smi checks, even if they
// happen to be identical at the moment.
int requested = requested_bytes >> kObjectAlignmentBits;
int value = (requested << kSpaceTagSize) | space;
// We can't very well allocate a heap number in this situation, and if the
// requested memory is so large it seems reasonable to say that this is an
// out of memory situation. This fixes a crash in
// js1_5/Regress/regress-303213.js.
if (value >> kSpaceTagSize != requested ||
!Smi::IsValid(value) ||
value != ((value << kFailureTypeTagSize) >> kFailureTypeTagSize) ||
!Smi::IsValid(value << kFailureTypeTagSize)) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
return Construct(RETRY_AFTER_GC, value);
}
// Should a word be prefixed by 'a' or 'an' in order to read naturally in // Should a word be prefixed by 'a' or 'an' in order to read naturally in
// English? Returns false for non-ASCII or words that don't start with // English? Returns false for non-ASCII or words that don't start with
// a capital letter. The a/an rule follows pronunciation in English. // a capital letter. The a/an rule follows pronunciation in English.
@ -8591,7 +8569,9 @@ Object* NumberDictionary::Set(uint32_t key,
details = PropertyDetails(details.attributes(), details = PropertyDetails(details.attributes(),
details.type(), details.type(),
DetailsAt(entry).index()); DetailsAt(entry).index());
SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details); Object* object_key = NumberDictionaryShape::AsObject(key);
if (object_key->IsFailure()) return object_key;
SetEntry(entry, object_key, value, details);
return this; return this;
} }

16
deps/v8/src/objects.h

@ -794,7 +794,7 @@ class Smi: public Object {
// //
// Failures are a single word, encoded as follows: // Failures are a single word, encoded as follows:
// +-------------------------+---+--+--+ // +-------------------------+---+--+--+
// |...rrrrrrrrrrrrrrrrrrrrrr|sss|tt|11| // |.........unused..........|sss|tt|11|
// +-------------------------+---+--+--+ // +-------------------------+---+--+--+
// 7 6 4 32 10 // 7 6 4 32 10
// //
@ -810,11 +810,6 @@ class Smi: public Object {
// allocation space tag is 000 for all failure types except // allocation space tag is 000 for all failure types except
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the // RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
// allocation spaces (the encoding is found in globals.h). // allocation spaces (the encoding is found in globals.h).
//
// The remaining bits is the size of the allocation request in units
// of the pointer size, and is zeroed except for RETRY_AFTER_GC
// failures. The 25 bits (on a 32 bit platform) gives a representable
// range of 2^27 bytes (128MB).
// Failure type tag info. // Failure type tag info.
const int kFailureTypeTagSize = 2; const int kFailureTypeTagSize = 2;
@ -836,15 +831,11 @@ class Failure: public Object {
// Returns the space that needs to be collected for RetryAfterGC failures. // Returns the space that needs to be collected for RetryAfterGC failures.
inline AllocationSpace allocation_space() const; inline AllocationSpace allocation_space() const;
// Returns the number of bytes requested (up to the representable maximum)
// for RetryAfterGC failures.
inline int requested() const;
inline bool IsInternalError() const; inline bool IsInternalError() const;
inline bool IsOutOfMemoryException() const; inline bool IsOutOfMemoryException() const;
static Failure* RetryAfterGC(int requested_bytes, AllocationSpace space); static inline Failure* RetryAfterGC(AllocationSpace space);
static inline Failure* RetryAfterGC(int requested_bytes); // NEW_SPACE static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception(); static inline Failure* Exception();
static inline Failure* InternalError(); static inline Failure* InternalError();
static inline Failure* OutOfMemoryException(); static inline Failure* OutOfMemoryException();
@ -1760,6 +1751,7 @@ class FixedArray: public HeapObject {
// Setters with less debug checks for the GC to use. // Setters with less debug checks for the GC to use.
inline void set_unchecked(int index, Smi* value); inline void set_unchecked(int index, Smi* value);
inline void set_null_unchecked(int index); inline void set_null_unchecked(int index);
inline void set_unchecked(int index, Object* value, WriteBarrierMode mode);
// Gives access to raw memory which stores the array's data. // Gives access to raw memory which stores the array's data.
inline Object** data_start(); inline Object** data_start();

4
deps/v8/src/platform-freebsd.cc

@ -291,6 +291,10 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
}
int OS::StackWalk(Vector<OS::StackFrame> frames) { int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_size = frames.length(); int frames_size = frames.length();
ScopedVector<void*> addresses(frames_size); ScopedVector<void*> addresses(frames_size);

33
deps/v8/src/platform-linux.cc

@ -397,6 +397,30 @@ void OS::LogSharedLibraryAddresses() {
} }
static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
void OS::SignalCodeMovingGC() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
// PROT_EXEC so that analysis tools can properly attribute ticks. We
// do a mmap with a name known by ll_prof.py and immediately munmap
// it. This injects a GC marker into the stream of events generated
// by the kernel and allows us to synchronize V8 code log and the
// kernel log.
int size = sysconf(_SC_PAGESIZE);
FILE* f = fopen(kGCFakeMmap, "w+");
void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
fileno(f), 0);
ASSERT(addr != MAP_FAILED);
munmap(addr, size);
fclose(f);
#endif
}
int OS::StackWalk(Vector<OS::StackFrame> frames) { int OS::StackWalk(Vector<OS::StackFrame> frames) {
// backtrace is a glibc extension. // backtrace is a glibc extension.
#ifdef __GLIBC__ #ifdef __GLIBC__
@ -748,6 +772,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info); USE(info);
if (signal != SIGPROF) return; if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return; if (active_sampler_ == NULL) return;
if (!IsVmThread()) return;
TickSample sample_obj; TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(); TickSample* sample = CpuProfiler::TickSampleEvent();
@ -755,6 +780,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
// We always sample the VM state. // We always sample the VM state.
sample->state = VMState::current_state(); sample->state = VMState::current_state();
// If profiling, we extract the current pc and sp. // If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) { if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent. // Extracting the sample from the context is extremely machine dependent.
@ -783,10 +809,8 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
// Implement this on MIPS. // Implement this on MIPS.
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif #endif
if (IsVmThread()) {
active_sampler_->SampleStack(sample); active_sampler_->SampleStack(sample);
} }
}
active_sampler_->Tick(sample); active_sampler_->Tick(sample);
#endif #endif
@ -806,7 +830,10 @@ class Sampler::PlatformData : public Malloced {
Sampler::Sampler(int interval, bool profiling) Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) { : interval_(interval),
profiling_(profiling),
synchronous_(profiling),
active_(false) {
data_ = new PlatformData(); data_ = new PlatformData();
} }

35
deps/v8/src/platform-macos.cc

@ -245,6 +245,10 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
}
uint64_t OS::CpuFeaturesImpliedByPlatform() { uint64_t OS::CpuFeaturesImpliedByPlatform() {
// MacOSX requires all these to install so we can assume they are present. // MacOSX requires all these to install so we can assume they are present.
// These constants are defined by the CPUid instructions. // These constants are defined by the CPUid instructions.
@ -549,17 +553,24 @@ class Sampler::PlatformData : public Malloced {
// Sampler thread handler. // Sampler thread handler.
void Runner() { void Runner() {
// Loop until the sampler is disengaged, keeping the specified samling freq. // Loop until the sampler is disengaged, keeping the specified
// sampling frequency.
for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) { for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
TickSample sample_obj; TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(); TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj; if (sample == NULL) sample = &sample_obj;
// If the sampler runs in sync with the JS thread, we try to
// suspend it. If we fail, we skip the current sample.
if (sampler_->IsSynchronous()) {
if (KERN_SUCCESS != thread_suspend(profiled_thread_)) continue;
}
// We always sample the VM state. // We always sample the VM state.
sample->state = VMState::current_state(); sample->state = VMState::current_state();
// If profiling, we record the pc and sp of the profiled thread. // If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling() if (sampler_->IsProfiling()) {
&& KERN_SUCCESS == thread_suspend(profiled_thread_)) {
#if V8_HOST_ARCH_X64 #if V8_HOST_ARCH_X64
thread_state_flavor_t flavor = x86_THREAD_STATE64; thread_state_flavor_t flavor = x86_THREAD_STATE64;
x86_thread_state64_t state; x86_thread_state64_t state;
@ -591,11 +602,14 @@ class Sampler::PlatformData : public Malloced {
sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(sample); sampler_->SampleStack(sample);
} }
thread_resume(profiled_thread_);
} }
// Invoke tick handler with program counter and stack pointer. // Invoke tick handler with program counter and stack pointer.
sampler_->Tick(sample); sampler_->Tick(sample);
// If the sampler runs in sync with the JS thread, we have to
// remember to resume it.
if (sampler_->IsSynchronous()) thread_resume(profiled_thread_);
} }
} }
}; };
@ -613,7 +627,10 @@ static void* SamplerEntry(void* arg) {
Sampler::Sampler(int interval, bool profiling) Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) { : interval_(interval),
profiling_(profiling),
synchronous_(profiling),
active_(false) {
data_ = new PlatformData(this); data_ = new PlatformData(this);
} }
@ -624,9 +641,9 @@ Sampler::~Sampler() {
void Sampler::Start() { void Sampler::Start() {
// If we are profiling, we need to be able to access the calling // If we are starting a synchronous sampler, we need to be able to
// thread. // access the calling thread.
if (IsProfiling()) { if (IsSynchronous()) {
data_->profiled_thread_ = mach_thread_self(); data_->profiled_thread_ = mach_thread_self();
} }
@ -655,7 +672,7 @@ void Sampler::Stop() {
pthread_join(data_->sampler_thread_, NULL); pthread_join(data_->sampler_thread_, NULL);
// Deallocate Mach port for thread. // Deallocate Mach port for thread.
if (IsProfiling()) { if (IsSynchronous()) {
mach_port_deallocate(data_->task_self_, data_->profiled_thread_); mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
} }
} }

5
deps/v8/src/platform-nullos.cc

@ -240,6 +240,11 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
UNIMPLEMENTED();
}
int OS::StackWalk(Vector<OS::StackFrame> frames) { int OS::StackWalk(Vector<OS::StackFrame> frames) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;

4
deps/v8/src/platform-openbsd.cc

@ -289,6 +289,10 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
}
int OS::StackWalk(Vector<OS::StackFrame> frames) { int OS::StackWalk(Vector<OS::StackFrame> frames) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return 1; return 1;

9
deps/v8/src/platform-solaris.cc

@ -256,6 +256,10 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
}
struct StackWalker { struct StackWalker {
Vector<OS::StackFrame>& frames; Vector<OS::StackFrame>& frames;
int index; int index;
@ -598,7 +602,10 @@ class Sampler::PlatformData : public Malloced {
Sampler::Sampler(int interval, bool profiling) Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) { : interval_(interval),
profiling_(profiling),
synchronous_(profiling),
active_(false) {
data_ = new PlatformData(); data_ = new PlatformData();
} }

39
deps/v8/src/platform-win32.cc

@ -845,14 +845,15 @@ void* OS::Allocate(const size_t requested,
bool is_executable) { bool is_executable) {
// The address range used to randomize RWX allocations in OS::Allocate // The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs // Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the // Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT #ifdef V8_HOST_ARCH_64_BIT
static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000; static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const intptr_t kAllocationRandomAddressMax = 0x000004FFFFFFFFFF; static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else #else
static const intptr_t kAllocationRandomAddressMin = 0x04000000; static const intptr_t kAllocationRandomAddressMin = 0x04000000;
static const intptr_t kAllocationRandomAddressMax = 0x4FFFFFFF; static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif #endif
// VirtualAlloc rounds allocated size to page size automatically. // VirtualAlloc rounds allocated size to page size automatically.
@ -1217,6 +1218,10 @@ void OS::LogSharedLibraryAddresses() {
} }
void OS::SignalCodeMovingGC() {
}
// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll // Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll
// Switch off warning 4748 (/GS can not protect parameters and local variables // Switch off warning 4748 (/GS can not protect parameters and local variables
@ -1838,17 +1843,25 @@ class Sampler::PlatformData : public Malloced {
// Context used for sampling the register state of the profiled thread. // Context used for sampling the register state of the profiled thread.
CONTEXT context; CONTEXT context;
memset(&context, 0, sizeof(context)); memset(&context, 0, sizeof(context));
// Loop until the sampler is disengaged, keeping the specified samling freq. // Loop until the sampler is disengaged, keeping the specified
// sampling frequency.
for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) { for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
TickSample sample_obj; TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(); TickSample* sample = CpuProfiler::TickSampleEvent();
if (sample == NULL) sample = &sample_obj; if (sample == NULL) sample = &sample_obj;
// If the sampler runs in sync with the JS thread, we try to
// suspend it. If we fail, we skip the current sample.
if (sampler_->IsSynchronous()) {
static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
if (SuspendThread(profiled_thread_) == kSuspendFailed) continue;
}
// We always sample the VM state. // We always sample the VM state.
sample->state = VMState::current_state(); sample->state = VMState::current_state();
// If profiling, we record the pc and sp of the profiled thread. // If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling() if (sampler_->IsProfiling()) {
&& SuspendThread(profiled_thread_) != (DWORD)-1) {
context.ContextFlags = CONTEXT_FULL; context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) { if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64 #if V8_HOST_ARCH_X64
@ -1862,11 +1875,14 @@ class Sampler::PlatformData : public Malloced {
#endif #endif
sampler_->SampleStack(sample); sampler_->SampleStack(sample);
} }
ResumeThread(profiled_thread_);
} }
// Invoke tick handler with program counter and stack pointer. // Invoke tick handler with program counter and stack pointer.
sampler_->Tick(sample); sampler_->Tick(sample);
// If the sampler runs in sync with the JS thread, we have to
// remember to resume it.
if (sampler_->IsSynchronous()) ResumeThread(profiled_thread_);
} }
} }
}; };
@ -1883,7 +1899,10 @@ static unsigned int __stdcall SamplerEntry(void* arg) {
// Initialize a profile sampler. // Initialize a profile sampler.
Sampler::Sampler(int interval, bool profiling) Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) { : interval_(interval),
profiling_(profiling),
synchronous_(profiling),
active_(false) {
data_ = new PlatformData(this); data_ = new PlatformData(this);
} }
@ -1895,9 +1914,9 @@ Sampler::~Sampler() {
// Start profiling. // Start profiling.
void Sampler::Start() { void Sampler::Start() {
// If we are profiling, we need to be able to access the calling // If we are starting a synchronous sampler, we need to be able to
// thread. // access the calling thread.
if (IsProfiling()) { if (IsSynchronous()) {
// Get a handle to the calling thread. This is the thread that we are // Get a handle to the calling thread. This is the thread that we are
// going to profile. We need to make a copy of the handle because we are // going to profile. We need to make a copy of the handle because we are
// going to use it in the sampler thread. Using GetThreadHandle() will // going to use it in the sampler thread. Using GetThreadHandle() will

24
deps/v8/src/platform.h

@ -257,11 +257,16 @@ class OS {
static char* StrChr(char* str, int c); static char* StrChr(char* str, int c);
static void StrNCpy(Vector<char> dest, const char* src, size_t n); static void StrNCpy(Vector<char> dest, const char* src, size_t n);
// Support for profiler. Can do nothing, in which case ticks // Support for the profiler. Can do nothing, in which case ticks
// occuring in shared libraries will not be properly accounted // occuring in shared libraries will not be properly accounted for.
// for.
static void LogSharedLibraryAddresses(); static void LogSharedLibraryAddresses();
// Support for the profiler. Notifies the external profiling
// process that a code moving garbage collection starts. Can do
// nothing, in which case the code objects must not move (e.g., by
// using --never-compact) if accurate profiling is desired.
static void SignalCodeMovingGC();
// The return value indicates the CPU features we are sure of because of the // The return value indicates the CPU features we are sure of because of the
// OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2 // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2
// instructions. // instructions.
@ -568,17 +573,24 @@ class Sampler {
void Start(); void Start();
void Stop(); void Stop();
// Is the sampler used for profiling. // Is the sampler used for profiling?
inline bool IsProfiling() { return profiling_; } bool IsProfiling() const { return profiling_; }
// Is the sampler running in sync with the JS thread? On platforms
// where the sampler is implemented with a thread that wakes up
// every now and then, having a synchronous sampler implies
// suspending/resuming the JS thread.
bool IsSynchronous() const { return synchronous_; }
// Whether the sampler is running (that is, consumes resources). // Whether the sampler is running (that is, consumes resources).
inline bool IsActive() { return active_; } bool IsActive() const { return active_; }
class PlatformData; class PlatformData;
private: private:
const int interval_; const int interval_;
const bool profiling_; const bool profiling_;
const bool synchronous_;
bool active_; bool active_;
PlatformData* data_; // Platform specific data. PlatformData* data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler); DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);

6
deps/v8/src/regexp-macro-assembler-irregexp.cc

@ -145,6 +145,12 @@ void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
} }
void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
ASSERT(is_uint24(by));
Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
}
void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) { void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
ASSERT(register_index >= 0); ASSERT(register_index >= 0);
ASSERT(register_index <= kMaxRegister); ASSERT(register_index <= kMaxRegister);

1
deps/v8/src/regexp-macro-assembler-irregexp.h

@ -65,6 +65,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void PushRegister(int register_index, virtual void PushRegister(int register_index,
StackCheckFlag check_stack_limit); StackCheckFlag check_stack_limit);
virtual void AdvanceRegister(int reg, int by); // r[reg] += by. virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
virtual void ClearRegisters(int reg_from, int reg_to); virtual void ClearRegisters(int reg_from, int reg_to);

6
deps/v8/src/regexp-macro-assembler-tracer.cc

@ -136,6 +136,12 @@ void RegExpMacroAssemblerTracer::AdvanceRegister(int reg, int by) {
} }
void RegExpMacroAssemblerTracer::SetCurrentPositionFromEnd(int by) {
PrintF(" SetCurrentPositionFromEnd(by=%d);\n", by);
assembler_->SetCurrentPositionFromEnd(by);
}
void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) { void RegExpMacroAssemblerTracer::SetRegister(int register_index, int to) {
PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to); PrintF(" SetRegister(register=%d, to=%d);\n", register_index, to);
assembler_->SetRegister(register_index, to); assembler_->SetRegister(register_index, to);

1
deps/v8/src/regexp-macro-assembler-tracer.h

@ -89,6 +89,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
StackCheckFlag check_stack_limit); StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg); virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);

1
deps/v8/src/regexp-macro-assembler.h

@ -155,6 +155,7 @@ class RegExpMacroAssembler {
StackCheckFlag check_stack_limit) = 0; StackCheckFlag check_stack_limit) = 0;
virtual void ReadCurrentPositionFromRegister(int reg) = 0; virtual void ReadCurrentPositionFromRegister(int reg) = 0;
virtual void ReadStackPointerFromRegister(int reg) = 0; virtual void ReadStackPointerFromRegister(int reg) = 0;
virtual void SetCurrentPositionFromEnd(int by) = 0;
virtual void SetRegister(int register_index, int to) = 0; virtual void SetRegister(int register_index, int to) = 0;
virtual void Succeed() = 0; virtual void Succeed() = 0;
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0; virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0;

4
deps/v8/src/runtime.cc

@ -6703,7 +6703,7 @@ static Object* Runtime_StackOverflow(Arguments args) {
static Object* Runtime_StackGuard(Arguments args) { static Object* Runtime_StackGuard(Arguments args) {
ASSERT(args.length() == 1); ASSERT(args.length() == 0);
// First check if this is a real stack overflow. // First check if this is a real stack overflow.
if (StackGuard::IsStackOverflow()) { if (StackGuard::IsStackOverflow()) {
@ -10153,7 +10153,7 @@ void Runtime::PerformGC(Object* result) {
if (failure->IsRetryAfterGC()) { if (failure->IsRetryAfterGC()) {
// Try to do a garbage collection; ignore it if it fails. The C // Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case. // entry stub will throw an out-of-memory exception in that case.
Heap::CollectGarbage(failure->requested(), failure->allocation_space()); Heap::CollectGarbage(failure->allocation_space());
} else { } else {
// Handle last resort GC and make sure to allow future allocations // Handle last resort GC and make sure to allow future allocations
// to grow the heap without causing GCs (if possible). // to grow the heap without causing GCs (if possible).

2
deps/v8/src/runtime.h

@ -267,7 +267,7 @@ namespace internal {
F(Throw, 1, 1) \ F(Throw, 1, 1) \
F(ReThrow, 1, 1) \ F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \ F(ThrowReferenceError, 1, 1) \
F(StackGuard, 1, 1) \ F(StackGuard, 0, 1) \
F(PromoteScheduledException, 0, 1) \ F(PromoteScheduledException, 0, 1) \
\ \
/* Contexts */ \ /* Contexts */ \

2
deps/v8/src/serialize.cc

@ -619,6 +619,8 @@ void Deserializer::Deserialize() {
external_reference_decoder_ = new ExternalReferenceDecoder(); external_reference_decoder_ = new ExternalReferenceDecoder();
Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG); Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
Heap::IterateWeakRoots(this, VISIT_ALL); Heap::IterateWeakRoots(this, VISIT_ALL);
Heap::set_global_contexts_list(Heap::undefined_value());
} }

9
deps/v8/src/spaces-inl.h

@ -407,8 +407,7 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
bool PagedSpace::Contains(Address addr) { bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr); Page* p = Page::FromAddress(addr);
ASSERT(p->is_valid()); if (!p->is_valid()) return false;
return MemoryAllocator::IsPageInSpace(p, this); return MemoryAllocator::IsPageInSpace(p, this);
} }
@ -440,7 +439,7 @@ Object* PagedSpace::AllocateRaw(int size_in_bytes) {
object = SlowAllocateRaw(size_in_bytes); object = SlowAllocateRaw(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
return Failure::RetryAfterGC(size_in_bytes, identity()); return Failure::RetryAfterGC(identity());
} }
@ -454,7 +453,7 @@ Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
object = SlowMCAllocateRaw(size_in_bytes); object = SlowMCAllocateRaw(size_in_bytes);
if (object != NULL) return object; if (object != NULL) return object;
return Failure::RetryAfterGC(size_in_bytes, identity()); return Failure::RetryAfterGC(identity());
} }
@ -475,7 +474,7 @@ HeapObject* LargeObjectChunk::GetObject() {
Object* NewSpace::AllocateRawInternal(int size_in_bytes, Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) { AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes; Address new_top = alloc_info->top + size_in_bytes;
if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
Object* obj = HeapObject::FromAddress(alloc_info->top); Object* obj = HeapObject::FromAddress(alloc_info->top);
alloc_info->top = new_top; alloc_info->top = new_top;

8
deps/v8/src/spaces.cc

@ -1828,7 +1828,7 @@ Object* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
if (cur == kEnd) { if (cur == kEnd) {
// No large enough size in list. // No large enough size in list.
*wasted_bytes = 0; *wasted_bytes = 0;
return Failure::RetryAfterGC(size_in_bytes, owner_); return Failure::RetryAfterGC(owner_);
} }
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
int rem = cur - index; int rem = cur - index;
@ -1926,7 +1926,7 @@ void FixedSizeFreeList::Free(Address start) {
Object* FixedSizeFreeList::Allocate() { Object* FixedSizeFreeList::Allocate() {
if (head_ == NULL) { if (head_ == NULL) {
return Failure::RetryAfterGC(object_size_, owner_); return Failure::RetryAfterGC(owner_);
} }
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
@ -2753,14 +2753,14 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// Check if we want to force a GC before growing the old space further. // Check if we want to force a GC before growing the old space further.
// If so, fail the allocation. // If so, fail the allocation.
if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
return Failure::RetryAfterGC(requested_size, identity()); return Failure::RetryAfterGC(identity());
} }
size_t chunk_size; size_t chunk_size;
LargeObjectChunk* chunk = LargeObjectChunk* chunk =
LargeObjectChunk::New(requested_size, &chunk_size, executable); LargeObjectChunk::New(requested_size, &chunk_size, executable);
if (chunk == NULL) { if (chunk == NULL) {
return Failure::RetryAfterGC(requested_size, identity()); return Failure::RetryAfterGC(identity());
} }
size_ += static_cast<int>(chunk_size); size_ += static_cast<int>(chunk_size);

1
deps/v8/src/spaces.h

@ -2194,7 +2194,6 @@ class LargeObjectSpace : public Space {
// if such a page doesn't exist. // if such a page doesn't exist.
LargeObjectChunk* FindChunkContainingPc(Address pc); LargeObjectChunk* FindChunkContainingPc(Address pc);
// Iterates objects covered by dirty regions. // Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func); void IterateDirtyRegions(ObjectSlotCallback func);

61
deps/v8/src/strtod.cc

@ -85,12 +85,22 @@ static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
extern "C" double gay_strtod(const char* s00, const char** se); extern "C" double gay_strtod(const char* s00, const char** se);
static double old_strtod(Vector<const char> buffer, int exponent) { static double old_strtod(Vector<const char> buffer, int exponent) {
// gay_strtod is broken on Linux,x86. For numbers with few decimal digits
// the computation is done using floating-point operations which (on Linux)
// are prone to double-rounding errors.
// By adding several zeroes to the buffer gay_strtod falls back to a slower
// (but correct) algorithm.
const int kInsertedZeroesCount = 20;
char gay_buffer[1024]; char gay_buffer[1024];
Vector<char> gay_buffer_vector(gay_buffer, sizeof(gay_buffer)); Vector<char> gay_buffer_vector(gay_buffer, sizeof(gay_buffer));
int pos = 0; int pos = 0;
for (int i = 0; i < buffer.length(); ++i) { for (int i = 0; i < buffer.length(); ++i) {
gay_buffer_vector[pos++] = buffer[i]; gay_buffer_vector[pos++] = buffer[i];
} }
for (int i = 0; i < kInsertedZeroesCount; ++i) {
gay_buffer_vector[pos++] = '0';
}
exponent -= kInsertedZeroesCount;
gay_buffer_vector[pos++] = 'e'; gay_buffer_vector[pos++] = 'e';
if (exponent < 0) { if (exponent < 0) {
gay_buffer_vector[pos++] = '-'; gay_buffer_vector[pos++] = '-';
@ -139,13 +149,18 @@ uint64_t ReadUint64(Vector<const char> buffer) {
} }
double Strtod(Vector<const char> buffer, int exponent) { static bool DoubleStrtod(Vector<const char> trimmed,
Vector<const char> left_trimmed = TrimLeadingZeros(buffer); int exponent,
Vector<const char> trimmed = TrimTrailingZeros(left_trimmed); double* result) {
exponent += left_trimmed.length() - trimmed.length(); #if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
if (trimmed.length() == 0) return 0.0; // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY; // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0; // result is not accurate.
// We know that Windows32 uses 64 bits and is therefore accurate.
// Note that the ARM simulator is compiled for 32bits. It therefore exhibits
// the same problem.
return false;
#endif
if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) { if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
// The trimmed input fits into a double. // The trimmed input fits into a double.
// If the 10^exponent (resp. 10^-exponent) fits into a double too then we // If the 10^exponent (resp. 10^-exponent) fits into a double too then we
@ -155,13 +170,15 @@ double Strtod(Vector<const char> buffer, int exponent) {
// return the best possible approximation. // return the best possible approximation.
if (exponent < 0 && -exponent < kExactPowersOfTenSize) { if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
// 10^-exponent fits into a double. // 10^-exponent fits into a double.
double buffer_d = static_cast<double>(ReadUint64(trimmed)); *result = static_cast<double>(ReadUint64(trimmed));
return buffer_d / exact_powers_of_ten[-exponent]; *result /= exact_powers_of_ten[-exponent];
return true;
} }
if (0 <= exponent && exponent < kExactPowersOfTenSize) { if (0 <= exponent && exponent < kExactPowersOfTenSize) {
// 10^exponent fits into a double. // 10^exponent fits into a double.
double buffer_d = static_cast<double>(ReadUint64(trimmed)); *result = static_cast<double>(ReadUint64(trimmed));
return buffer_d * exact_powers_of_ten[exponent]; *result *= exact_powers_of_ten[exponent];
return true;
} }
int remaining_digits = int remaining_digits =
kMaxExactDoubleIntegerDecimalDigits - trimmed.length(); kMaxExactDoubleIntegerDecimalDigits - trimmed.length();
@ -170,10 +187,26 @@ double Strtod(Vector<const char> buffer, int exponent) {
// The trimmed string was short and we can multiply it with // The trimmed string was short and we can multiply it with
// 10^remaining_digits. As a result the remaining exponent now fits // 10^remaining_digits. As a result the remaining exponent now fits
// into a double too. // into a double too.
double buffer_d = static_cast<double>(ReadUint64(trimmed)); *result = static_cast<double>(ReadUint64(trimmed));
buffer_d *= exact_powers_of_ten[remaining_digits]; *result *= exact_powers_of_ten[remaining_digits];
return buffer_d * exact_powers_of_ten[exponent - remaining_digits]; *result *= exact_powers_of_ten[exponent - remaining_digits];
return true;
}
}
return false;
} }
double Strtod(Vector<const char> buffer, int exponent) {
Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
exponent += left_trimmed.length() - trimmed.length();
if (trimmed.length() == 0) return 0.0;
if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
double result;
if (DoubleStrtod(trimmed, exponent, &result)) {
return result;
} }
return old_strtod(trimmed, exponent); return old_strtod(trimmed, exponent);
} }

3
deps/v8/src/top.cc

@ -68,6 +68,9 @@ void ThreadLocalTop::Initialize() {
handler_ = 0; handler_ = 0;
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0; js_entry_sp_ = 0;
#endif
#ifdef ENABLE_VMSTATE_TRACKING
current_vm_state_ = NULL;
#endif #endif
try_catch_handler_address_ = NULL; try_catch_handler_address_ = NULL;
context_ = NULL; context_ = NULL;

16
deps/v8/src/top.h

@ -41,6 +41,7 @@ namespace internal {
class SaveContext; // Forward declaration. class SaveContext; // Forward declaration.
class ThreadVisitor; // Defined in v8threads.h class ThreadVisitor; // Defined in v8threads.h
class VMState; // Defined in vm-state.h
class ThreadLocalTop BASE_EMBEDDED { class ThreadLocalTop BASE_EMBEDDED {
public: public:
@ -101,10 +102,15 @@ class ThreadLocalTop BASE_EMBEDDED {
// Stack. // Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack Address handler_; // try-blocks are chained through the stack
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif #endif
#ifdef ENABLE_VMSTATE_TRACKING
VMState* current_vm_state_;
#endif
// Generated code scratch locations. // Generated code scratch locations.
int32_t formal_count_; int32_t formal_count_;
@ -254,6 +260,16 @@ class Top {
} }
#endif #endif
#ifdef ENABLE_VMSTATE_TRACKING
static VMState* current_vm_state() {
return thread_local_.current_vm_state_;
}
static void set_current_vm_state(VMState* state) {
thread_local_.current_vm_state_ = state;
}
#endif
// Generated code scratch locations. // Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; } static void* formal_count_address() { return &thread_local_.formal_count_; }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2 #define MAJOR_VERSION 2
#define MINOR_VERSION 5 #define MINOR_VERSION 5
#define BUILD_NUMBER 0 #define BUILD_NUMBER 1
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

6
deps/v8/src/vm-state-inl.h

@ -75,9 +75,9 @@ VMState::VMState(StateTag state)
#endif #endif
state_ = state; state_ = state;
// Save the previous state. // Save the previous state.
previous_ = reinterpret_cast<VMState*>(current_state_); previous_ = Top::current_vm_state();
// Install the new state. // Install the new state.
OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(this)); Top::set_current_vm_state(this);
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) { if (FLAG_log_state_changes) {
@ -106,7 +106,7 @@ VMState::VMState(StateTag state)
VMState::~VMState() { VMState::~VMState() {
if (disabled_) return; if (disabled_) return;
// Return to the previous state. // Return to the previous state.
OS::ReleaseStore(&current_state_, reinterpret_cast<AtomicWord>(previous_)); Top::set_current_vm_state(previous_);
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_state_changes) { if (FLAG_log_state_changes) {

39
deps/v8/src/vm-state.cc

@ -1,39 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "vm-state.h"
namespace v8 {
namespace internal {
#ifdef ENABLE_VMSTATE_TRACKING
AtomicWord VMState::current_state_ = 0;
#endif
} } // namespace v8::internal

10
deps/v8/src/vm-state.h

@ -28,6 +28,8 @@
#ifndef V8_VM_STATE_H_ #ifndef V8_VM_STATE_H_
#define V8_VM_STATE_H_ #define V8_VM_STATE_H_
#include "top.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -44,16 +46,16 @@ class VMState BASE_EMBEDDED {
// Used for debug asserts. // Used for debug asserts.
static bool is_outermost_external() { static bool is_outermost_external() {
return current_state_ == 0; return Top::current_vm_state() == 0;
} }
static StateTag current_state() { static StateTag current_state() {
VMState* state = reinterpret_cast<VMState*>(current_state_); VMState* state = Top::current_vm_state();
return state ? state->state() : EXTERNAL; return state ? state->state() : EXTERNAL;
} }
static Address external_callback() { static Address external_callback() {
VMState* state = reinterpret_cast<VMState*>(current_state_); VMState* state = Top::current_vm_state();
return state ? state->external_callback_ : NULL; return state ? state->external_callback_ : NULL;
} }
@ -63,8 +65,6 @@ class VMState BASE_EMBEDDED {
VMState* previous_; VMState* previous_;
Address external_callback_; Address external_callback_;
// A stack of VM states.
static AtomicWord current_state_;
#else #else
public: public:
explicit VMState(StateTag state) {} explicit VMState(StateTag state) {}

13
deps/v8/src/x64/code-stubs-x64.cc

@ -2123,7 +2123,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSmi(rax, rdx, &non_smi); __ JumpIfNotBothSmi(rax, rdx, &non_smi);
__ subq(rdx, rax); __ subq(rdx, rax);
__ j(no_overflow, &smi_done); __ j(no_overflow, &smi_done);
__ neg(rdx); // Correct sign in case of overflow. __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done); __ bind(&smi_done);
__ movq(rax, rdx); __ movq(rax, rdx);
__ ret(0); __ ret(0);
@ -2394,16 +2394,7 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
void StackCheckStub::Generate(MacroAssembler* masm) { void StackCheckStub::Generate(MacroAssembler* masm) {
// Because builtins always remove the receiver from the stack, we __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
// have to fake one to avoid underflowing the stack. The receiver
// must be inserted below the return address on the stack so we
// temporarily store that in a register.
__ pop(rax);
__ Push(Smi::FromInt(0));
__ push(rax);
// Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
} }

8
deps/v8/src/x64/macro-assembler-x64.h

@ -101,9 +101,9 @@ class MacroAssembler: public Assembler {
// dirty. |object| is the object being stored into, |value| is the // dirty. |object| is the object being stored into, |value| is the
// object being stored. If |offset| is zero, then the |scratch| // object being stored. If |offset| is zero, then the |scratch|
// register contains the array index into the elements array // register contains the array index into the elements array
// represented as a Smi. All registers are clobbered by the // represented as an untagged 32-bit integer. All registers are
// operation. RecordWrite filters out smis so it does not update the // clobbered by the operation. RecordWrite filters out smis so it
// write barrier if the value is a smi. // does not update the write barrier if the value is a smi.
void RecordWrite(Register object, void RecordWrite(Register object,
int offset, int offset,
Register value, Register value,
@ -122,7 +122,7 @@ class MacroAssembler: public Assembler {
// The value is known to not be a smi. // The value is known to not be a smi.
// object is the object being stored into, value is the object being stored. // object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into // If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi. // the elements array represented as an untagged 32-bit integer.
// All registers are clobbered by the operation. // All registers are clobbered by the operation.
void RecordWriteNonSmi(Register object, void RecordWriteNonSmi(Register object,
int offset, int offset,

14
deps/v8/src/x64/regexp-macro-assembler-x64.cc

@ -145,7 +145,6 @@ int RegExpMacroAssemblerX64::stack_limit_slack() {
void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) { void RegExpMacroAssemblerX64::AdvanceCurrentPosition(int by) {
if (by != 0) { if (by != 0) {
Label inside_string;
__ addq(rdi, Immediate(by * char_size())); __ addq(rdi, Immediate(by * char_size()));
} }
} }
@ -1053,6 +1052,19 @@ void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
} }
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
NearLabel after_position;
__ cmpq(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
// the current position is expected to be already loaded.
// We have advanced the position, so it's safe to read backwards.
LoadCurrentCharacterUnchecked(-1, 1);
__ bind(&after_position);
}
void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) { void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
ASSERT(register_index >= num_saved_registers_); // Reserved for positions! ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
__ movq(register_location(register_index), Immediate(to)); __ movq(register_location(register_index), Immediate(to));

1
deps/v8/src/x64/regexp-macro-assembler-x64.h

@ -93,6 +93,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
StackCheckFlag check_stack_limit); StackCheckFlag check_stack_limit);
virtual void ReadCurrentPositionFromRegister(int reg); virtual void ReadCurrentPositionFromRegister(int reg);
virtual void ReadStackPointerFromRegister(int reg); virtual void ReadStackPointerFromRegister(int reg);
virtual void SetCurrentPositionFromEnd(int by);
virtual void SetRegister(int register_index, int to); virtual void SetRegister(int register_index, int to);
virtual void Succeed(); virtual void Succeed();
virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);

2
deps/v8/test/cctest/test-alloc.cc

@ -37,7 +37,7 @@ using namespace v8::internal;
static Object* AllocateAfterFailures() { static Object* AllocateAfterFailures() {
static int attempts = 0; static int attempts = 0;
if (++attempts < 3) return Failure::RetryAfterGC(0); if (++attempts < 3) return Failure::RetryAfterGC();
// New space. // New space.
NewSpace* new_space = Heap::new_space(); NewSpace* new_space = Heap::new_space();

40
deps/v8/test/cctest/test-api.cc

@ -431,8 +431,8 @@ THREADED_TEST(ScriptMakingExternalString) {
LocalContext env; LocalContext env;
Local<String> source = String::New(two_byte_source); Local<String> source = String::New(two_byte_source);
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal(new TestResource(two_byte_source)); bool success = source->MakeExternal(new TestResource(two_byte_source));
CHECK(success); CHECK(success);
Local<Script> script = Script::Compile(source); Local<Script> script = Script::Compile(source);
@ -456,8 +456,8 @@ THREADED_TEST(ScriptMakingExternalAsciiString) {
LocalContext env; LocalContext env;
Local<String> source = v8_str(c_source); Local<String> source = v8_str(c_source);
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
bool success = source->MakeExternal( bool success = source->MakeExternal(
new TestAsciiResource(i::StrDup(c_source))); new TestAsciiResource(i::StrDup(c_source)));
CHECK(success); CHECK(success);
@ -479,8 +479,8 @@ TEST(MakingExternalStringConditions) {
LocalContext env; LocalContext env;
// Free some space in the new space so that we can check freshness. // Free some space in the new space so that we can check freshness.
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
uint16_t* two_byte_string = AsciiToTwoByteString("small"); uint16_t* two_byte_string = AsciiToTwoByteString("small");
Local<String> small_string = String::New(two_byte_string); Local<String> small_string = String::New(two_byte_string);
@ -489,8 +489,8 @@ TEST(MakingExternalStringConditions) {
// We should refuse to externalize newly created small string. // We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal()); CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted. // Old space strings should be accepted.
CHECK(small_string->CanMakeExternal()); CHECK(small_string->CanMakeExternal());
@ -525,15 +525,15 @@ TEST(MakingExternalAsciiStringConditions) {
LocalContext env; LocalContext env;
// Free some space in the new space so that we can check freshness. // Free some space in the new space so that we can check freshness.
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
Local<String> small_string = String::New("small"); Local<String> small_string = String::New("small");
// We should refuse to externalize newly created small string. // We should refuse to externalize newly created small string.
CHECK(!small_string->CanMakeExternal()); CHECK(!small_string->CanMakeExternal());
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
// Old space strings should be accepted. // Old space strings should be accepted.
CHECK(small_string->CanMakeExternal()); CHECK(small_string->CanMakeExternal());
@ -565,8 +565,8 @@ THREADED_TEST(UsingExternalString) {
String::NewExternal(new TestResource(two_byte_string)); String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring); i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
CHECK(isymbol->IsSymbol()); CHECK(isymbol->IsSymbol());
} }
@ -583,8 +583,8 @@ THREADED_TEST(UsingExternalAsciiString) {
new TestAsciiResource(i::StrDup(one_byte_string))); new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
// Trigger GCs so that the newly allocated string moves to old gen. // Trigger GCs so that the newly allocated string moves to old gen.
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(i::NEW_SPACE); // in survivor space now
i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Heap::CollectGarbage(i::NEW_SPACE); // in old gen now
i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring); i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
CHECK(isymbol->IsSymbol()); CHECK(isymbol->IsSymbol());
} }
@ -602,12 +602,12 @@ THREADED_TEST(ScavengeExternalString) {
Local<String> string = Local<String> string =
String::NewExternal(new TestResource(two_byte_string)); String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
in_new_space = i::Heap::InNewSpace(*istring); in_new_space = i::Heap::InNewSpace(*istring);
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring)); CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
CHECK_EQ(0, TestResource::dispose_count); CHECK_EQ(0, TestResource::dispose_count);
} }
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE); i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestResource::dispose_count); CHECK_EQ(1, TestResource::dispose_count);
} }
@ -621,12 +621,12 @@ THREADED_TEST(ScavengeExternalAsciiString) {
Local<String> string = String::NewExternal( Local<String> string = String::NewExternal(
new TestAsciiResource(i::StrDup(one_byte_string))); new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE); i::Heap::CollectGarbage(i::NEW_SPACE);
in_new_space = i::Heap::InNewSpace(*istring); in_new_space = i::Heap::InNewSpace(*istring);
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring)); CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
CHECK_EQ(0, TestAsciiResource::dispose_count); CHECK_EQ(0, TestAsciiResource::dispose_count);
} }
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE); i::Heap::CollectGarbage(in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestAsciiResource::dispose_count); CHECK_EQ(1, TestAsciiResource::dispose_count);
} }

6
deps/v8/test/cctest/test-debug.cc

@ -867,7 +867,7 @@ static void DebugEventBreakPointCollectGarbage(
break_point_hit_count++; break_point_hit_count++;
if (break_point_hit_count % 2 == 0) { if (break_point_hit_count % 2 == 0) {
// Scavenge. // Scavenge.
Heap::CollectGarbage(0, v8::internal::NEW_SPACE); Heap::CollectGarbage(v8::internal::NEW_SPACE);
} else { } else {
// Mark sweep compact. // Mark sweep compact.
Heap::CollectAllGarbage(true); Heap::CollectAllGarbage(true);
@ -891,7 +891,7 @@ static void DebugEventBreak(v8::DebugEvent event,
// Run the garbage collector to enforce heap verification if option // Run the garbage collector to enforce heap verification if option
// --verify-heap is set. // --verify-heap is set.
Heap::CollectGarbage(0, v8::internal::NEW_SPACE); Heap::CollectGarbage(v8::internal::NEW_SPACE);
// Set the break flag again to come back here as soon as possible. // Set the break flag again to come back here as soon as possible.
v8::Debug::DebugBreak(); v8::Debug::DebugBreak();
@ -1322,7 +1322,7 @@ static void CallAndGC(v8::Local<v8::Object> recv,
CHECK_EQ(1 + i * 3, break_point_hit_count); CHECK_EQ(1 + i * 3, break_point_hit_count);
// Scavenge and call function. // Scavenge and call function.
Heap::CollectGarbage(0, v8::internal::NEW_SPACE); Heap::CollectGarbage(v8::internal::NEW_SPACE);
f->Call(recv, 0, NULL); f->Call(recv, 0, NULL);
CHECK_EQ(2 + i * 3, break_point_hit_count); CHECK_EQ(2 + i * 3, break_point_hit_count);

2
deps/v8/test/cctest/test-decls.cc

@ -130,7 +130,7 @@ void DeclarationContext::Check(const char* source,
InitializeIfNeeded(); InitializeIfNeeded();
// A retry after a GC may pollute the counts, so perform gc now // A retry after a GC may pollute the counts, so perform gc now
// to avoid that. // to avoid that.
v8::internal::Heap::CollectGarbage(0, v8::internal::NEW_SPACE); v8::internal::Heap::CollectGarbage(v8::internal::NEW_SPACE);
HandleScope scope; HandleScope scope;
TryCatch catcher; TryCatch catcher;
catcher.SetVerbose(true); catcher.SetVerbose(true);

117
deps/v8/test/cctest/test-heap.cc

@ -177,13 +177,11 @@ TEST(Tagging) {
int request = 24; int request = 24;
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request))); CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi()); CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure()); CHECK(Failure::RetryAfterGC(NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
CHECK_EQ(NEW_SPACE, CHECK_EQ(NEW_SPACE,
Failure::RetryAfterGC(request, NEW_SPACE)->allocation_space()); Failure::RetryAfterGC(NEW_SPACE)->allocation_space());
CHECK_EQ(OLD_POINTER_SPACE, CHECK_EQ(OLD_POINTER_SPACE,
Failure::RetryAfterGC(request, Failure::RetryAfterGC(OLD_POINTER_SPACE)->allocation_space());
OLD_POINTER_SPACE)->allocation_space());
CHECK(Failure::Exception()->IsFailure()); CHECK(Failure::Exception()->IsFailure());
CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi()); CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
@ -195,8 +193,7 @@ TEST(GarbageCollection) {
v8::HandleScope sc; v8::HandleScope sc;
// Check GC. // Check GC.
int free_bytes = Heap::MaxObjectSizeInPagedSpace(); Heap::CollectGarbage(NEW_SPACE);
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
Handle<String> name = Factory::LookupAsciiSymbol("theFunction"); Handle<String> name = Factory::LookupAsciiSymbol("theFunction");
Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot"); Handle<String> prop_name = Factory::LookupAsciiSymbol("theSlot");
@ -221,7 +218,7 @@ TEST(GarbageCollection) {
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex)); CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
} }
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE)); Heap::CollectGarbage(NEW_SPACE);
// Function should be alive. // Function should be alive.
CHECK(Top::context()->global()->HasLocalProperty(*name)); CHECK(Top::context()->global()->HasLocalProperty(*name));
@ -239,7 +236,7 @@ TEST(GarbageCollection) {
} }
// After gc, it should survive. // After gc, it should survive.
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE)); Heap::CollectGarbage(NEW_SPACE);
CHECK(Top::context()->global()->HasLocalProperty(*obj_name)); CHECK(Top::context()->global()->HasLocalProperty(*obj_name));
CHECK(Top::context()->global()->GetProperty(*obj_name)->IsJSObject()); CHECK(Top::context()->global()->GetProperty(*obj_name)->IsJSObject());
@ -301,7 +298,7 @@ TEST(GlobalHandles) {
} }
// after gc, it should survive // after gc, it should survive
CHECK(Heap::CollectGarbage(0, NEW_SPACE)); Heap::CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString()); CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber()); CHECK((*h2)->IsHeapNumber());
@ -382,8 +379,8 @@ TEST(WeakGlobalHandlesMark) {
h2 = GlobalHandles::Create(*u); h2 = GlobalHandles::Create(*u);
} }
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
CHECK(Heap::CollectGarbage(0, NEW_SPACE)); Heap::CollectGarbage(NEW_SPACE);
// Make sure the object is promoted. // Make sure the object is promoted.
GlobalHandles::MakeWeak(h2.location(), GlobalHandles::MakeWeak(h2.location(),
@ -392,7 +389,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!GlobalHandles::IsNearDeath(h1.location())); CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location())); CHECK(!GlobalHandles::IsNearDeath(h2.location()));
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
CHECK((*h1)->IsString()); CHECK((*h1)->IsString());
@ -426,7 +423,7 @@ TEST(DeleteWeakGlobalHandle) {
CHECK(!WeakPointerCleared); CHECK(!WeakPointerCleared);
// Mark-compact treats weak reference properly. // Mark-compact treats weak reference properly.
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
CHECK(WeakPointerCleared); CHECK(WeakPointerCleared);
} }
@ -814,8 +811,7 @@ TEST(Iteration) {
TEST(LargeObjectSpaceContains) { TEST(LargeObjectSpaceContains) {
InitializeVM(); InitializeVM();
int free_bytes = Heap::MaxObjectSizeInPagedSpace(); Heap::CollectGarbage(NEW_SPACE);
CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE));
Address current_top = Heap::new_space()->top(); Address current_top = Heap::new_space()->top();
Page* page = Page::FromAddress(current_top); Page* page = Page::FromAddress(current_top);
@ -958,6 +954,7 @@ TEST(Regression39128) {
CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize))); CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
} }
TEST(TestCodeFlushing) { TEST(TestCodeFlushing) {
i::FLAG_allow_natives_syntax = true; i::FLAG_allow_natives_syntax = true;
// If we do not flush code this test is invalid. // If we do not flush code this test is invalid.
@ -1001,3 +998,91 @@ TEST(TestCodeFlushing) {
CHECK(function->shared()->is_compiled()); CHECK(function->shared()->is_compiled());
CHECK(function->is_compiled()); CHECK(function->is_compiled());
} }
// Count the number of global contexts in the weak list of global contexts.
static int CountGlobalContexts() {
int count = 0;
Object* object = Heap::global_contexts_list();
while (!object->IsUndefined()) {
count++;
object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
}
return count;
}
TEST(TestInternalWeakLists) {
static const int kNumTestContexts = 10;
v8::HandleScope scope;
v8::Persistent<v8::Context> ctx[kNumTestContexts];
CHECK_EQ(0, CountGlobalContexts());
// Create a number of global contests which gets linked together.
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i] = v8::Context::New();
CHECK_EQ(i + 1, CountGlobalContexts());
ctx[i]->Enter();
ctx[i]->Exit();
}
// Force compilation cache cleanup.
Heap::CollectAllGarbage(true);
// Dispose the global contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i].Dispose();
ctx[i].Clear();
// Scavenge treats these references as strong.
for (int j = 0; j < 10; j++) {
Heap::PerformScavenge();
CHECK_EQ(kNumTestContexts - i, CountGlobalContexts());
}
// Mark compact handles the weak references.
Heap::CollectAllGarbage(true);
CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
}
CHECK_EQ(0, CountGlobalContexts());
}
// Count the number of global contexts in the weak list of global contexts
// causing a GC after the specified number of elements.
static int CountGlobalContextsWithGC(int n) {
int count = 0;
Handle<Object> object(Heap::global_contexts_list());
while (!object->IsUndefined()) {
count++;
if (count == n) Heap::CollectAllGarbage(true);
object =
Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
}
return count;
}
TEST(TestInternalWeakListsTraverseWithGC) {
static const int kNumTestContexts = 10;
v8::HandleScope scope;
v8::Persistent<v8::Context> ctx[kNumTestContexts];
CHECK_EQ(0, CountGlobalContexts());
// Create an number of contexts and check the length of the weak list both
// with and without GCs while iterating the list.
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i] = v8::Context::New();
CHECK_EQ(i + 1, CountGlobalContexts());
CHECK_EQ(i + 1, CountGlobalContextsWithGC(i / 2 + 1));
ctx[i]->Enter();
ctx[i]->Exit();
}
}

2
deps/v8/test/cctest/test-log.cc

@ -469,7 +469,7 @@ TEST(ProfMultipleThreads) {
CHECK(!sampler.WasSampleStackCalled()); CHECK(!sampler.WasSampleStackCalled());
nonJsThread.WaitForRunning(); nonJsThread.WaitForRunning();
nonJsThread.SendSigProf(); nonJsThread.SendSigProf();
CHECK(sampler.WaitForTick()); CHECK(!sampler.WaitForTick());
CHECK(!sampler.WasSampleStackCalled()); CHECK(!sampler.WasSampleStackCalled());
sampler.Stop(); sampler.Stop();

22
deps/v8/test/cctest/test-mark-compact.cc

@ -94,7 +94,7 @@ TEST(Promotion) {
CHECK(Heap::InSpace(*array, NEW_SPACE)); CHECK(Heap::InSpace(*array, NEW_SPACE));
// Call the m-c collector, so array becomes an old object. // Call the m-c collector, so array becomes an old object.
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// Array now sits in the old space // Array now sits in the old space
CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE)); CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE));
@ -111,7 +111,7 @@ TEST(NoPromotion) {
v8::HandleScope sc; v8::HandleScope sc;
// Do a mark compact GC to shrink the heap. // Do a mark compact GC to shrink the heap.
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// Allocate a big Fixed array in the new space. // Allocate a big Fixed array in the new space.
int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) / int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
@ -134,7 +134,7 @@ TEST(NoPromotion) {
} }
// Call mark compact GC, and it should pass. // Call mark compact GC, and it should pass.
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// array should not be promoted because the old space is full. // array should not be promoted because the old space is full.
CHECK(Heap::InSpace(*array, NEW_SPACE)); CHECK(Heap::InSpace(*array, NEW_SPACE));
@ -146,7 +146,7 @@ TEST(MarkCompactCollector) {
v8::HandleScope sc; v8::HandleScope sc;
// call mark-compact when heap is empty // call mark-compact when heap is empty
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// keep allocating garbage in new space until it fails // keep allocating garbage in new space until it fails
const int ARRAY_SIZE = 100; const int ARRAY_SIZE = 100;
@ -154,7 +154,7 @@ TEST(MarkCompactCollector) {
do { do {
array = Heap::AllocateFixedArray(ARRAY_SIZE); array = Heap::AllocateFixedArray(ARRAY_SIZE);
} while (!array->IsFailure()); } while (!array->IsFailure());
CHECK(Heap::CollectGarbage(0, NEW_SPACE)); Heap::CollectGarbage(NEW_SPACE);
array = Heap::AllocateFixedArray(ARRAY_SIZE); array = Heap::AllocateFixedArray(ARRAY_SIZE);
CHECK(!array->IsFailure()); CHECK(!array->IsFailure());
@ -164,7 +164,7 @@ TEST(MarkCompactCollector) {
do { do {
mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
} while (!mapp->IsFailure()); } while (!mapp->IsFailure());
CHECK(Heap::CollectGarbage(0, MAP_SPACE)); Heap::CollectGarbage(MAP_SPACE);
mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); mapp = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
CHECK(!mapp->IsFailure()); CHECK(!mapp->IsFailure());
@ -182,7 +182,7 @@ TEST(MarkCompactCollector) {
Top::context()->global()->SetProperty(func_name, function, NONE); Top::context()->global()->SetProperty(func_name, function, NONE);
JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function)); JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
func_name = String::cast(Heap::LookupAsciiSymbol("theFunction")); func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
CHECK(Top::context()->global()->HasLocalProperty(func_name)); CHECK(Top::context()->global()->HasLocalProperty(func_name));
@ -196,7 +196,7 @@ TEST(MarkCompactCollector) {
String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot")); String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
obj->SetProperty(prop_name, Smi::FromInt(23), NONE); obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
obj_name = String::cast(Heap::LookupAsciiSymbol("theObject")); obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
CHECK(Top::context()->global()->HasLocalProperty(obj_name)); CHECK(Top::context()->global()->HasLocalProperty(obj_name));
@ -264,7 +264,7 @@ TEST(GCCallback) {
CHECK_EQ(0, gc_starts); CHECK_EQ(0, gc_starts);
CHECK_EQ(gc_ends, gc_starts); CHECK_EQ(gc_ends, gc_starts);
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
CHECK_EQ(1, gc_starts); CHECK_EQ(1, gc_starts);
CHECK_EQ(gc_ends, gc_starts); CHECK_EQ(gc_ends, gc_starts);
} }
@ -317,7 +317,7 @@ TEST(ObjectGroups) {
GlobalHandles::AddGroup(g2_objects, 2); GlobalHandles::AddGroup(g2_objects, 2);
} }
// Do a full GC // Do a full GC
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// All object should be alive. // All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls); CHECK_EQ(0, NumberOfWeakCalls);
@ -335,7 +335,7 @@ TEST(ObjectGroups) {
GlobalHandles::AddGroup(g2_objects, 2); GlobalHandles::AddGroup(g2_objects, 2);
} }
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); Heap::CollectGarbage(OLD_POINTER_SPACE);
// All objects should be gone. 5 global handles in total. // All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls); CHECK_EQ(5, NumberOfWeakCalls);

6
deps/v8/test/cctest/test-strtod.cc

@ -198,4 +198,10 @@ TEST(Strtod) {
CHECK_EQ(1234e304, StrtodChar("0000000123400000", 299)); CHECK_EQ(1234e304, StrtodChar("0000000123400000", 299));
CHECK_EQ(V8_INFINITY, StrtodChar("00000000180000000", 300)); CHECK_EQ(V8_INFINITY, StrtodChar("00000000180000000", 300));
CHECK_EQ(17e307, StrtodChar("00000000170000000", 300)); CHECK_EQ(17e307, StrtodChar("00000000170000000", 300));
// The following number is the result of 89255.0/1e-22. Both floating-point
// numbers can be accurately represented with doubles. However on Linux,x86
// the floating-point stack is set to 80bits and the double-rounding
// introduces an error.
CHECK_EQ(89255e-22, StrtodChar("89255", -22));
} }

227
deps/v8/test/mjsunit/int32-ops.js

@ -0,0 +1,227 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Repeat most the tests in smi-ops.js that use SMI_MIN and SMI_MAX, but
// with SMI_MIN and SMI_MAX from the 64-bit platform, which represents all
// signed 32-bit integer values as smis.
const SMI_MAX = (1 << 30) - 1 + (1 << 30); // Create without overflowing.
const SMI_MIN = -SMI_MAX - 1; // Create without overflowing.
const ONE = 1;
const ONE_HUNDRED = 100;
const OBJ_42 = new (function() {
this.valueOf = function() { return 42; };
})();
assertEquals(42, OBJ_42.valueOf());
function Add1(x) {
return x + 1;
}
function Add100(x) {
return x + 100;
}
function Add1Reversed(x) {
return 1 + x;
}
function Add100Reversed(x) {
return 100 + x;
}
assertEquals(1, Add1(0)); // fast case
assertEquals(1, Add1Reversed(0)); // fast case
assertEquals(SMI_MAX + ONE, Add1(SMI_MAX), "smimax + 1");
assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX), "1 + smimax");
assertEquals(42 + ONE, Add1(OBJ_42)); // non-smi
assertEquals(42 + ONE, Add1Reversed(OBJ_42)); // non-smi
assertEquals(100, Add100(0)); // fast case
assertEquals(100, Add100Reversed(0)); // fast case
assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX), "smimax + 100");
assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX), " 100 + smimax");
assertEquals(42 + ONE_HUNDRED, Add100(OBJ_42)); // non-smi
assertEquals(42 + ONE_HUNDRED, Add100Reversed(OBJ_42)); // non-smi
function Sub1(x) {
return x - 1;
}
function Sub100(x) {
return x - 100;
}
function Sub1Reversed(x) {
return 1 - x;
}
function Sub100Reversed(x) {
return 100 - x;
}
assertEquals(0, Sub1(1)); // fast case
assertEquals(-1, Sub1Reversed(2)); // fast case
assertEquals(SMI_MIN - ONE, Sub1(SMI_MIN)); // overflow
assertEquals(ONE - SMI_MIN, Sub1Reversed(SMI_MIN)); // overflow
assertEquals(42 - ONE, Sub1(OBJ_42)); // non-smi
assertEquals(ONE - 42, Sub1Reversed(OBJ_42)); // non-smi
assertEquals(0, Sub100(100)); // fast case
assertEquals(1, Sub100Reversed(99)); // fast case
assertEquals(SMI_MIN - ONE_HUNDRED, Sub100(SMI_MIN)); // overflow
assertEquals(ONE_HUNDRED - SMI_MIN, Sub100Reversed(SMI_MIN)); // overflow
assertEquals(42 - ONE_HUNDRED, Sub100(OBJ_42)); // non-smi
assertEquals(ONE_HUNDRED - 42, Sub100Reversed(OBJ_42)); // non-smi
function Shr1(x) {
return x >>> 1;
}
function Shr100(x) {
return x >>> 100;
}
function Shr1Reversed(x) {
return 1 >>> x;
}
function Shr100Reversed(x) {
return 100 >>> x;
}
function Sar1(x) {
return x >> 1;
}
function Sar100(x) {
return x >> 100;
}
function Sar1Reversed(x) {
return 1 >> x;
}
function Sar100Reversed(x) {
return 100 >> x;
}
assertEquals(0, Shr1(1));
assertEquals(0, Sar1(1));
assertEquals(0, Shr1Reversed(2));
assertEquals(0, Sar1Reversed(2));
assertEquals(1073741824, Shr1(SMI_MIN));
assertEquals(-1073741824, Sar1(SMI_MIN));
assertEquals(1, Shr1Reversed(SMI_MIN));
assertEquals(1, Sar1Reversed(SMI_MIN));
assertEquals(21, Shr1(OBJ_42));
assertEquals(21, Sar1(OBJ_42));
assertEquals(0, Shr1Reversed(OBJ_42));
assertEquals(0, Sar1Reversed(OBJ_42));
assertEquals(6, Shr100(100), "100 >>> 100");
assertEquals(6, Sar100(100), "100 >> 100");
assertEquals(12, Shr100Reversed(99));
assertEquals(12, Sar100Reversed(99));
assertEquals(134217728, Shr100(SMI_MIN));
assertEquals(-134217728, Sar100(SMI_MIN));
assertEquals(100, Shr100Reversed(SMI_MIN));
assertEquals(100, Sar100Reversed(SMI_MIN));
assertEquals(2, Shr100(OBJ_42));
assertEquals(2, Sar100(OBJ_42));
assertEquals(0, Shr100Reversed(OBJ_42));
assertEquals(0, Sar100Reversed(OBJ_42));
function Xor1(x) {
return x ^ 1;
}
function Xor100(x) {
return x ^ 100;
}
function Xor1Reversed(x) {
return 1 ^ x;
}
function Xor100Reversed(x) {
return 100 ^ x;
}
assertEquals(0, Xor1(1));
assertEquals(3, Xor1Reversed(2));
assertEquals(SMI_MIN + 1, Xor1(SMI_MIN));
assertEquals(SMI_MIN + 1, Xor1Reversed(SMI_MIN));
assertEquals(43, Xor1(OBJ_42));
assertEquals(43, Xor1Reversed(OBJ_42));
assertEquals(0, Xor100(100));
assertEquals(7, Xor100Reversed(99));
assertEquals(-2147483548, Xor100(SMI_MIN));
assertEquals(-2147483548, Xor100Reversed(SMI_MIN));
assertEquals(78, Xor100(OBJ_42));
assertEquals(78, Xor100Reversed(OBJ_42));
var x = 0x23; var y = 0x35;
assertEquals(0x16, x ^ y);
// Bitwise not.
var v = 0;
assertEquals(-1, ~v);
v = SMI_MIN;
assertEquals(0x7fffffff, ~v, "~smimin");
v = SMI_MAX;
assertEquals(-0x80000000, ~v, "~smimax");
// Overflowing ++ and --.
v = SMI_MAX;
v++;
assertEquals(0x80000000, v, "smimax++");
v = SMI_MIN;
v--;
assertEquals(-0x80000001, v, "smimin--");
// Check that comparisons of numbers separated by MIN_SMI work.
assertFalse(SMI_MIN > 0);
assertFalse(SMI_MIN + 1 > 1);
assertFalse(SMI_MIN + 1 > 2);
assertFalse(SMI_MIN + 2 > 1);
assertFalse(0 < SMI_MIN);
assertTrue(-1 < SMI_MAX);
assertFalse(SMI_MAX < -1);

58
deps/v8/test/mjsunit/regexp.js

@ -589,3 +589,61 @@ assertEquals(0, desc.value);
assertEquals(false, desc.configurable); assertEquals(false, desc.configurable);
assertEquals(false, desc.enumerable); assertEquals(false, desc.enumerable);
assertEquals(true, desc.writable); assertEquals(true, desc.writable);
// Check that end-anchored regexps are optimized correctly.
var re = /(?:a|bc)g$/;
assertTrue(re.test("ag"));
assertTrue(re.test("bcg"));
assertTrue(re.test("abcg"));
assertTrue(re.test("zimbag"));
assertTrue(re.test("zimbcg"));
assertFalse(re.test("g"));
assertFalse(re.test(""));
// Global regexp (non-zero start).
var re = /(?:a|bc)g$/g;
assertTrue(re.test("ag"));
re.lastIndex = 1; // Near start of string.
assertTrue(re.test("zimbag"));
re.lastIndex = 6; // At end of string.
assertFalse(re.test("zimbag"));
re.lastIndex = 5; // Near end of string.
assertFalse(re.test("zimbag"));
re.lastIndex = 4;
assertTrue(re.test("zimbag"));
// Anchored at both ends.
var re = /^(?:a|bc)g$/g;
assertTrue(re.test("ag"));
re.lastIndex = 1;
assertFalse(re.test("ag"));
re.lastIndex = 1;
assertFalse(re.test("zag"));
// Long max_length of RegExp.
var re = /VeryLongRegExp!{1,1000}$/;
assertTrue(re.test("BahoolaVeryLongRegExp!!!!!!"));
assertFalse(re.test("VeryLongRegExp"));
assertFalse(re.test("!"));
// End anchor inside disjunction.
var re = /(?:a$|bc$)/;
assertTrue(re.test("a"));
assertTrue(re.test("bc"));
assertTrue(re.test("abc"));
assertTrue(re.test("zimzamzumba"));
assertTrue(re.test("zimzamzumbc"));
assertFalse(re.test("c"));
assertFalse(re.test(""));
// Only partially anchored.
var re = /(?:a|bc$)/;
assertTrue(re.test("a"));
assertTrue(re.test("bc"));
assertEquals(["a"], re.exec("abc"));
assertEquals(4, re.exec("zimzamzumba").index);
assertEquals(["bc"], re.exec("zimzomzumbc"));
assertFalse(re.test("c"));
assertFalse(re.test(""));

15
deps/v8/test/mjsunit/smi-ops.js

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
const SMI_MAX = (1 << 30) - 1; const SMI_MAX = (1 << 29) - 1 + (1 << 29); // Create without overflowing.
const SMI_MIN = -(1 << 30); const SMI_MIN = -SMI_MAX - 1; // Create without overflowing.
const ONE = 1; const ONE = 1;
const ONE_HUNDRED = 100; const ONE_HUNDRED = 100;
@ -213,6 +213,15 @@ v = SMI_MIN;
v--; v--;
assertEquals(-0x40000001, v, "smimin--"); assertEquals(-0x40000001, v, "smimin--");
// Check that comparisons of numbers separated by MIN_SMI work.
assertFalse(SMI_MIN > 0);
assertFalse(SMI_MIN + 1 > 1);
assertFalse(SMI_MIN + 1 > 2);
assertFalse(SMI_MIN + 2 > 1);
assertFalse(0 < SMI_MIN);
assertTrue(-1 < SMI_MAX);
assertFalse(SMI_MAX < -1);
// Not actually Smi operations. // Not actually Smi operations.
// Check that relations on unary ops work. // Check that relations on unary ops work.
var v = -1.2; var v = -1.2;

1
deps/v8/tools/gyp/v8.gyp

@ -476,7 +476,6 @@
'../../src/virtual-frame.cc', '../../src/virtual-frame.cc',
'../../src/virtual-frame.h', '../../src/virtual-frame.h',
'../../src/vm-state-inl.h', '../../src/vm-state-inl.h',
'../../src/vm-state.cc',
'../../src/vm-state.h', '../../src/vm-state.h',
'../../src/zone-inl.h', '../../src/zone-inl.h',
'../../src/zone.cc', '../../src/zone.cc',

955
deps/v8/tools/ll_prof.py

@ -0,0 +1,955 @@
#!/usr/bin/env python
#
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bisect
import collections
import ctypes
import mmap
import optparse
import os
import re
import subprocess
import sys
import tempfile
import time
USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
$ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
# -f: force output file overwrite
# -i: limit profiling to our process and the kernel
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
$ %prog --snapshot --disasm-top=10
# Print flat profile with annotated disassembly for all used symbols.
# Use default log names and include kernel symbols into analysis.
$ %prog --disasm-all --kernel
# Print flat profile. Use custom log names.
$ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
"""
# Must match kGcFakeMmap.
V8_GC_FAKE_MMAP = "/tmp/__v8_gc__"
JS_ORIGIN = "js"
JS_SNAPSHOT_ORIGIN = "js-snapshot"
# Avoid using the slow (google-specific) wrapper around objdump.
OBJDUMP_BIN = "/usr/bin/objdump"
if not os.path.exists(OBJDUMP_BIN):
OBJDUMP_BIN = "objdump"
class Code(object):
"""Code object."""
_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"]
_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$")
_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):.*")
# Keys must match constants in Logger::LogCodeInfo.
_ARCH_MAP = {
"ia32": "-m i386",
"x64": "-m i386 -M x86-64",
"arm": "-m arm" # Not supported by our objdump build.
}
_id = 0
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
Code._id += 1
self.name = name
self.other_names = None
self.start_address = start_address
self.end_address = end_address
self.origin = origin
self.origin_offset = origin_offset
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
def AddName(self, name):
assert self.name != name
if self.other_names is None:
self.other_names = [name]
return
if not name in self.other_names:
self.other_names.append(name)
def FullName(self):
if self.other_names is None:
return self.name
self.other_names.sort()
return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
def IsUsed(self):
return self.self_ticks > 0 or self.callee_ticks is not None
def Tick(self, pc):
self.self_ticks += 1
if self.self_ticks_map is None:
self.self_ticks_map = collections.defaultdict(lambda: 0)
offset = pc - self.start_address
self.self_ticks_map[offset] += 1
def CalleeTick(self, callee):
if self.callee_ticks is None:
self.callee_ticks = collections.defaultdict(lambda: 0)
self.callee_ticks[callee] += 1
def PrintAnnotated(self, code_info, options):
if self.self_ticks_map is None:
ticks_map = []
else:
ticks_map = self.self_ticks_map.items()
# Convert the ticks map to offsets and counts arrays so that later
# we can do binary search in the offsets array.
ticks_map.sort(key=lambda t: t[0])
ticks_offsets = [t[0] for t in ticks_map]
ticks_counts = [t[1] for t in ticks_map]
# Get a list of disassembled lines and their addresses.
lines = []
for line in self._GetDisasmLines(code_info, options):
match = Code._DISASM_LINE_RE.match(line)
if match:
line_address = int(match.group(1), 16)
lines.append((line_address, line))
if len(lines) == 0:
return
# Print annotated lines.
address = lines[0][0]
total_count = 0
for i in xrange(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
else:
end_offset = lines[i + 1][0] - address
# Ticks (reported pc values) are not always precise, i.e. not
# necessarily point at instruction starts. So we have to search
# for ticks that touch the current instruction line.
j = bisect.bisect_left(ticks_offsets, end_offset)
count = 0
for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
if offset < start_offset:
break
count += cnt
total_count += count
count = 100.0 * count / self.self_ticks
if count >= 0.01:
print "%15.2f %s" % (count, lines[i][1])
else:
print "%s %s" % (" " * 15, lines[i][1])
print
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
def __str__(self):
return "%s [0x%x, 0x%x) size: %d origin: %s" % (
self.name,
self.start_address,
self.end_address,
self.end_address - self.start_address,
self.origin)
def _GetDisasmLines(self, code_info, options):
tmp_name = None
if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
assert code_info.arch in Code._ARCH_MAP, \
"Unsupported architecture '%s'" % arch
arch_flags = Code._ARCH_MAP[code_info.arch]
# Create a temporary file just with this code object.
tmp_name = tempfile.mktemp(".v8code")
size = self.end_address - self.start_address
command = "dd if=%s.code of=%s bs=1 count=%d skip=%d && " \
"%s %s -D -b binary %s %s" % (
options.log, tmp_name, size, self.origin_offset,
OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS), arch_flags,
tmp_name)
else:
command = "%s %s --start-address=%d --stop-address=%d -d %s " % (
OBJDUMP_BIN, ' '.join(Code._COMMON_DISASM_OPTIONS),
self.origin_offset,
self.origin_offset + self.end_address - self.start_address,
self.origin)
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = process.communicate()
lines = out.split("\n")
header_line = 0
for i, line in enumerate(lines):
if Code._DISASM_HEADER_RE.match(line):
header_line = i
break
if tmp_name:
os.unlink(tmp_name)
return lines[header_line + 1:]
class CodePage(object):
"""Group of adjacent code objects."""
SHIFT = 12 # 4K pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@staticmethod
def PageAddress(address):
return address & CodePage.MASK
@staticmethod
def PageId(address):
return address >> CodePage.SHIFT
@staticmethod
def PageAddressFromId(id):
return id << CodePage.SHIFT
def __init__(self, address):
self.address = address
self.code_objects = []
def Add(self, code):
self.code_objects.append(code)
def Remove(self, code):
self.code_objects.remove(code)
def Find(self, pc):
code_objects = self.code_objects
for i, code in enumerate(code_objects):
if code.start_address <= pc < code.end_address:
code_objects[0], code_objects[i] = code, code_objects[0]
return code
return None
def __iter__(self):
return self.code_objects.__iter__()
class CodeMap(object):
"""Code object map."""
def __init__(self):
self.pages = {}
self.min_address = 1 << 64
self.max_address = -1
def Add(self, code, max_pages=-1):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
print >>sys.stderr, \
"Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin)
break
if page_id in self.pages:
page = self.pages[page_id]
else:
page = CodePage(CodePage.PageAddressFromId(page_id))
self.pages[page_id] = page
page.Add(code)
page_id += 1
pages += 1
self.min_address = min(self.min_address, code.start_address)
self.max_address = max(self.max_address, code.end_address)
def Remove(self, code):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
removed = False
while page_id < limit_id:
if page_id not in self.pages:
page_id += 1
continue
page = self.pages[page_id]
page.Remove(code)
removed = True
page_id += 1
return removed
def AllCode(self):
for page in self.pages.itervalues():
for code in page:
if CodePage.PageAddress(code.start_address) == page.address:
yield code
def UsedCode(self):
for code in self.AllCode():
if code.IsUsed():
yield code
def Print(self):
for code in self.AllCode():
print code
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
return None
page_id = CodePage.PageId(pc)
if page_id not in self.pages:
return None
return self.pages[page_id].Find(pc)
class CodeInfo(object):
"""Generic info about generated code objects."""
def __init__(self, arch, header_size):
self.arch = arch
self.header_size = header_size
class CodeLogReader(object):
"""V8 code event log reader."""
_CODE_INFO_RE = re.compile(
r"code-info,([^,]+),(\d+)")
_CODE_CREATE_RE = re.compile(
r"code-creation,([^,]+),(0x[a-f0-9]+),(\d+),\"([^\"]*)\"(?:,(\d+))?")
_CODE_MOVE_RE = re.compile(
r"code-move,(0x[a-f0-9]+),(0x[a-f0-9]+)")
_CODE_DELETE_RE = re.compile(
r"code-delete,(0x[a-f0-9]+)")
_SNAPSHOT_POS_RE = re.compile(
r"snapshot-pos,(0x[a-f0-9]+),(\d+)")
_CODE_MOVING_GC = "code-moving-gc"
def __init__(self, log_name, code_map, is_snapshot, snapshot_pos_to_name):
self.log = open(log_name, "r")
self.code_map = code_map
self.is_snapshot = is_snapshot
self.snapshot_pos_to_name = snapshot_pos_to_name
self.address_to_snapshot_name = {}
def ReadCodeInfo(self):
line = self.log.readline() or ""
match = CodeLogReader._CODE_INFO_RE.match(line)
assert match, "No code info in log"
return CodeInfo(arch=match.group(1), header_size=int(match.group(2)))
def ReadUpToGC(self, code_info):
made_progress = False
code_header_size = code_info.header_size
while True:
line = self.log.readline()
if not line:
return made_progress
made_progress = True
if line.startswith(CodeLogReader._CODE_MOVING_GC):
self.address_to_snapshot_name.clear()
return made_progress
match = CodeLogReader._CODE_CREATE_RE.match(line)
if match:
start_address = int(match.group(2), 16) + code_header_size
end_address = start_address + int(match.group(3)) - code_header_size
if start_address in self.address_to_snapshot_name:
name = self.address_to_snapshot_name[start_address]
origin = JS_SNAPSHOT_ORIGIN
else:
name = "%s:%s" % (match.group(1), match.group(4))
origin = JS_ORIGIN
if self.is_snapshot:
origin_offset = 0
else:
origin_offset = int(match.group(5))
code = Code(name, start_address, end_address, origin, origin_offset)
conficting_code = self.code_map.Find(start_address)
if conficting_code:
CodeLogReader._HandleCodeConflict(conficting_code, code)
# TODO(vitalyr): this warning is too noisy because of our
# attempts to reconstruct code log from the snapshot.
# print >>sys.stderr, \
# "Warning: Skipping duplicate code log entry %s" % code
continue
self.code_map.Add(code)
continue
match = CodeLogReader._CODE_MOVE_RE.match(line)
if match:
old_start_address = int(match.group(1), 16) + code_header_size
new_start_address = int(match.group(2), 16) + code_header_size
if old_start_address == new_start_address:
# Skip useless code move entries.
continue
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
size = code.end_address - code.start_address
code.start_address = new_start_address
code.end_address = new_start_address + size
self.code_map.Add(code)
continue
match = CodeLogReader._CODE_DELETE_RE.match(line)
if match:
old_start_address = int(match.group(1), 16) + code_header_size
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact delete address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
continue
match = CodeLogReader._SNAPSHOT_POS_RE.match(line)
if match:
start_address = int(match.group(1), 16) + code_header_size
snapshot_pos = int(match.group(2))
if self.is_snapshot:
code = self.code_map.Find(start_address)
if code:
assert code.start_address == start_address, \
"Inexact snapshot address %x for %s" % (start_address, code)
self.snapshot_pos_to_name[snapshot_pos] = code.name
else:
if snapshot_pos in self.snapshot_pos_to_name:
self.address_to_snapshot_name[start_address] = \
self.snapshot_pos_to_name[snapshot_pos]
def Dispose(self):
self.log.close()
@staticmethod
def _HandleCodeConflict(old_code, new_code):
assert (old_code.start_address == new_code.start_address and
old_code.end_address == new_code.end_address), \
"Conficting code log entries %s and %s" % (old_code, new_code)
CodeLogReader._UpdateNames(old_code, new_code)
@staticmethod
def _UpdateNames(old_code, new_code):
if old_code.name == new_code.name:
return
# Kludge: there are code objects with custom names that don't
# match their flags.
misnamed_code = set(["Builtin:CpuFeatures::Probe"])
if old_code.name in misnamed_code:
return
# Code object may be shared by a few functions. Collect the full
# set of names.
old_code.AddName(new_code.name)
class Descriptor(object):
"""Descriptor of a structure in the binary trace log."""
CTYPE_MAP = {
"u16": ctypes.c_uint16,
"u32": ctypes.c_uint32,
"u64": ctypes.c_uint64
}
def __init__(self, fields):
class TraceItem(ctypes.Structure):
_fields_ = Descriptor.CtypesFields(fields)
def __str__(self):
return ", ".join("%s: %s" % (field, self.__getattribute__(field))
for field, _ in TraceItem._fields_)
self.ctype = TraceItem
def Read(self, trace, offset):
return self.ctype.from_buffer(trace, offset)
@staticmethod
def CtypesFields(fields):
return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
# for the gory details.
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
("attr_size", "u64"),
("attrs_offset", "u64"),
("attrs_size", "u64"),
("data_offset", "u64"),
("data_size", "u64"),
("event_types_offset", "u64"),
("event_types_size", "u64")
])
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
("config", "u64"),
("sample_period_or_freq", "u64"),
("sample_type", "u64"),
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
("bt_type", "u32"),
("bp_addr", "u64"),
("bp_len", "u64"),
])
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
("size", "u16")
])
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
("addr", "u64"),
("len", "u64"),
("pgoff", "u64")
])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
PERF_SAMPLE_IP = 1 << 0
PERF_SAMPLE_TID = 1 << 1
PERF_SAMPLE_TIME = 1 << 2
PERF_SAMPLE_ADDR = 1 << 3
PERF_SAMPLE_READ = 1 << 4
PERF_SAMPLE_CALLCHAIN = 1 << 5
PERF_SAMPLE_ID = 1 << 6
PERF_SAMPLE_CPU = 1 << 7
PERF_SAMPLE_PERIOD = 1 << 8
PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
("tid", "u32", PERF_SAMPLE_TID),
("time", "u64", PERF_SAMPLE_TIME),
("addr", "u64", PERF_SAMPLE_ADDR),
("id", "u64", PERF_SAMPLE_ID),
("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
("cpu", "u32", PERF_SAMPLE_CPU),
("res", "u32", PERF_SAMPLE_CPU),
("period", "u64", PERF_SAMPLE_PERIOD),
# Don't want to handle read format that comes after the period and
# before the callchain and has variable size.
("nr", "u64", PERF_SAMPLE_CALLCHAIN)
# Raw data follows the callchain and is ignored.
]
PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
PERF_RECORD_SAMPLE = 9
class TraceReader(object):
"""Perf (linux-2.6/tools/perf) trace file reader."""
_TRACE_HEADER_MAGIC = 4993446653023372624
def __init__(self, trace_name):
self.trace_file = open(trace_name, "r")
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
print >>sys.stderr, "Warning: unsupported trace header magic"
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
"Trace data limit exceeds trace file size"
self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
assert self.trace_header.attrs_size != 0, \
"No perf event attributes found in the trace"
perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
self.trace_header.attrs_offset)
self.sample_event_body_desc = self._SampleEventBodyDesc(
perf_event_attr.sample_type)
self.callchain_supported = \
(perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
if self.callchain_supported:
self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
self.ip_size = ctypes.sizeof(self.ip_struct)
def ReadEventHeader(self):
if self.offset >= self.limit:
return None, 0
offset = self.offset
header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
self.offset += header.size
return header, offset
def ReadMmap(self, header, offset):
mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
offset + self.header_size)
# Read null-padded filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
offset + header.size].rstrip(chr(0))
mmap_info.filename = filename
return mmap_info
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
if not self.callchain_supported:
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
for _ in xrange(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
return sample
def Dispose(self):
self.trace.close()
self.trace_file.close()
def _SampleEventBodyDesc(self, sample_type):
assert (sample_type & PERF_SAMPLE_READ) == 0, \
"Can't hande read format in samples"
fields = [(field, format)
for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
if (bit & sample_type) != 0]
return Descriptor(fields)
OBJDUMP_SECTION_HEADER_RE = re.compile(
r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
OBJDUMP_SYMBOL_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
r"^DYNAMIC SYMBOL TABLE")
KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
PERF_KERNEL_ALLSYMS_RE = re.compile(
r".*kallsyms.*")
KERNEL_ALLSYMS_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
class LibraryRepo(object):
def __init__(self):
self.infos = []
self.names = set()
self.ticks = {}
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
if mmap_info.tid == 0 and not options.kernel:
return True
if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
return self._LoadKernelSymbols(code_map)
self.infos.append(mmap_info)
mmap_info.ticks = 0
mmap_info.unique_name = self._UniqueMmapName(mmap_info)
if not os.path.exists(mmap_info.filename):
return True
# Request section headers (-h), symbols (-t), and dynamic symbols
# (-T) from objdump.
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
process = subprocess.Popen(
"%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
code_sections = set()
reloc_sections = set()
dynamic = False
try:
for line in pipe:
if after_section:
if line.find("CODE") != -1:
code_sections.add(after_section)
if line.find("RELOC") != -1:
reloc_sections.add(after_section)
after_section = None
continue
match = OBJDUMP_SECTION_HEADER_RE.match(line)
if match:
after_section = match.group(1)
continue
if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
dynamic = True
continue
match = OBJDUMP_SYMBOL_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
origin_offset = start_address
flags = match.group(2)
section = match.group(3)
if section in code_sections:
if dynamic or section in reloc_sections:
start_address += mmap_info.addr
size = int(match.group(4), 16)
name = match.group(5)
origin = mmap_info.filename
code_map.Add(Code(name, start_address, start_address + size,
origin, origin_offset))
finally:
pipe.close()
assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
def Tick(self, pc):
for i, mmap_info in enumerate(self.infos):
if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
mmap_info.ticks += 1
self.infos[0], self.infos[i] = mmap_info, self.infos[0]
return True
return False
def _UniqueMmapName(self, mmap_info):
name = mmap_info.filename
index = 1
while name in self.names:
name = "%s-%d" % (mmap_info.filename, index)
index += 1
self.names.add(name)
return name
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
for line in kallsyms:
match = KERNEL_ALLSYMS_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
end_address = start_address
name = match.group(2)
if code:
code.end_address = start_address
code_map.Add(code, 16)
code = Code(name, start_address, end_address, "kernel", 0)
return True
def PrintReport(code_map, library_repo, code_info, options):
print "Ticks per symbol:"
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
print "%10d %s [%s]" % (code.self_ticks, code.FullName(), code.origin)
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(code_info, options)
print
print "Ticks per library:"
mmap_infos = [m for m in library_repo.infos]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
print "%10d %s" % (mmap_info.ticks, mmap_info.unique_name)
def PrintDot(code_map, options):
print "digraph G {"
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
print "}"
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("--snapshot-log",
default="obj/release/snapshot.log",
help="V8 snapshot log file name [default: %default]")
parser.add_option("--log",
default="v8.log",
help="V8 log file name [default: %default]")
parser.add_option("--snapshot",
default=False,
action="store_true",
help="process V8 snapshot log [default: %default]")
parser.add_option("--trace",
default="perf.data",
help="perf trace file name [default: %default]")
parser.add_option("--kernel",
default=False,
action="store_true",
help="process kernel entries [default: %default]")
parser.add_option("--disasm-top",
default=0,
type="int",
help=("number of top symbols to disassemble and annotate "
"[default: %default]"))
parser.add_option("--disasm-all",
default=False,
action="store_true",
help=("disassemble and annotate all used symbols "
"[default: %default]"))
parser.add_option("--dot",
default=False,
action="store_true",
help="produce dot output (WIP) [default: %default]")
parser.add_option("--quiet", "-q",
default=False,
action="store_true",
help="no auxiliary messages [default: %default]")
options, args = parser.parse_args()
if not options.quiet:
if options.snapshot:
print "V8 logs: %s, %s, %s.code" % (options.snapshot_log,
options.log,
options.log)
else:
print "V8 log: %s, %s.code (no snapshot)" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
# Stats.
events = 0
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
mmap_time = 0
sample_time = 0
# Initialize the log reader and get the code info.
code_map = CodeMap()
snapshot_name_map = {}
log_reader = CodeLogReader(log_name=options.log,
code_map=code_map,
is_snapshot=False,
snapshot_pos_to_name=snapshot_name_map)
code_info = log_reader.ReadCodeInfo()
if not options.quiet:
print "Generated code architecture: %s" % code_info.arch
print
# Process the snapshot log to fill the snapshot name map.
if options.snapshot:
snapshot_log_reader = CodeLogReader(log_name=options.snapshot_log,
code_map=CodeMap(),
is_snapshot=True,
snapshot_pos_to_name=snapshot_name_map)
while snapshot_log_reader.ReadUpToGC(code_info):
pass
# Process the code and trace logs.
library_repo = LibraryRepo()
log_reader.ReadUpToGC(code_info)
trace_reader = TraceReader(options.trace)
while True:
header, offset = trace_reader.ReadEventHeader()
if not header:
break
events += 1
if header.type == PERF_RECORD_MMAP:
start = time.time()
mmap_info = trace_reader.ReadMmap(header, offset)
if mmap_info.filename == V8_GC_FAKE_MMAP:
log_reader.ReadUpToGC()
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
sample = trace_reader.ReadSample(header, offset)
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
really_missed_ticks += 1
if trace_reader.callchain_supported:
for ip in sample.ips:
caller_code = code_map.Find(ip)
if caller_code:
if code:
caller_code.CalleeTick(code)
code = caller_code
sample_time += time.time() - start
if options.dot:
PrintDot(code_map, options)
else:
PrintReport(code_map, library_repo, code_info, options)
if not options.quiet:
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
print "%10d unaccounted ticks" % really_missed_ticks
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
print "%9.2fs tick processing time" % sample_time
log_reader.Dispose()
trace_reader.Dispose()

6
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -220,8 +220,6 @@
9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; }; 9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; }; 9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; }; 9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA37333116DD9F000C4CD55 /* vm-state.cc */; };
9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA37333116DD9F000C4CD55 /* vm-state.cc */; };
9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; }; 9FA38BB31175B2D200C4CD55 /* data-flow.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */; };
9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; }; 9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38B9E1175B2D200C4CD55 /* diy-fp.cc */; };
9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; }; 9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */; };
@ -590,7 +588,6 @@
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; }; 9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
9FA36F62116BA26500C4CD55 /* v8-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-profiler.h"; sourceTree = "<group>"; }; 9FA36F62116BA26500C4CD55 /* v8-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-profiler.h"; sourceTree = "<group>"; };
9FA37332116DD9F000C4CD55 /* vm-state-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state-inl.h"; sourceTree = "<group>"; }; 9FA37332116DD9F000C4CD55 /* vm-state-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state-inl.h"; sourceTree = "<group>"; };
9FA37333116DD9F000C4CD55 /* vm-state.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "vm-state.cc"; sourceTree = "<group>"; };
9FA37334116DD9F000C4CD55 /* vm-state.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state.h"; sourceTree = "<group>"; }; 9FA37334116DD9F000C4CD55 /* vm-state.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "vm-state.h"; sourceTree = "<group>"; };
9FA38B9B1175B2D200C4CD55 /* cached-powers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cached-powers.h"; sourceTree = "<group>"; }; 9FA38B9B1175B2D200C4CD55 /* cached-powers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cached-powers.h"; sourceTree = "<group>"; };
9FA38B9C1175B2D200C4CD55 /* data-flow.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "data-flow.cc"; sourceTree = "<group>"; }; 9FA38B9C1175B2D200C4CD55 /* data-flow.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "data-flow.cc"; sourceTree = "<group>"; };
@ -1013,7 +1010,6 @@
58950D5A0F55514900F3E8BA /* virtual-frame.cc */, 58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
58950D5B0F55514900F3E8BA /* virtual-frame.h */, 58950D5B0F55514900F3E8BA /* virtual-frame.h */,
9FA37332116DD9F000C4CD55 /* vm-state-inl.h */, 9FA37332116DD9F000C4CD55 /* vm-state-inl.h */,
9FA37333116DD9F000C4CD55 /* vm-state.cc */,
9FA37334116DD9F000C4CD55 /* vm-state.h */, 9FA37334116DD9F000C4CD55 /* vm-state.h */,
897FF1A10E719B8F00D62E90 /* zone-inl.h */, 897FF1A10E719B8F00D62E90 /* zone-inl.h */,
897FF1A20E719B8F00D62E90 /* zone.cc */, 897FF1A20E719B8F00D62E90 /* zone.cc */,
@ -1377,7 +1373,6 @@
9FA38BC71175B2E500C4CD55 /* virtual-frame-ia32.cc in Sources */, 9FA38BC71175B2E500C4CD55 /* virtual-frame-ia32.cc in Sources */,
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */, 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */, 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
9FA37336116DD9F000C4CD55 /* vm-state.cc in Sources */,
89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */, 89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */, C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
); );
@ -1503,7 +1498,6 @@
58950D690F5551CE00F3E8BA /* virtual-frame-light.cc in Sources */, 58950D690F5551CE00F3E8BA /* virtual-frame-light.cc in Sources */,
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */, 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */, 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
9FA37335116DD9F000C4CD55 /* vm-state.cc in Sources */,
89F23C820E78D5B2006B2466 /* zone.cc in Sources */, 89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */, C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
); );

4
deps/v8/tools/visual_studio/v8_base.vcproj

@ -1081,10 +1081,6 @@
RelativePath="..\..\src\virtual-frame-heavy.cc" RelativePath="..\..\src\virtual-frame-heavy.cc"
> >
</File> </File>
<File
RelativePath="..\..\src\vm-state.cc"
>
</File>
<File <File
RelativePath="..\..\src\vm-state-inl.h" RelativePath="..\..\src\vm-state-inl.h"
> >

4
deps/v8/tools/visual_studio/v8_base_arm.vcproj

@ -1059,10 +1059,6 @@
RelativePath="..\..\src\virtual-frame-light.cc" RelativePath="..\..\src\virtual-frame-light.cc"
> >
</File> </File>
<File
RelativePath="..\..\src\vm-state.cc"
>
</File>
<File <File
RelativePath="..\..\src\vm-state-inl.h" RelativePath="..\..\src\vm-state-inl.h"
> >

4
deps/v8/tools/visual_studio/v8_base_x64.vcproj

@ -1041,10 +1041,6 @@
RelativePath="..\..\src\virtual-frame-heavy.cc" RelativePath="..\..\src\virtual-frame-heavy.cc"
> >
</File> </File>
<File
RelativePath="..\..\src\vm-state.cc"
>
</File>
<File <File
RelativePath="..\..\src\vm-state-inl.h" RelativePath="..\..\src\vm-state-inl.h"
> >

Loading…
Cancel
Save