Browse Source

Merge branch 'V8-3.4'

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
5709643289
  1. 33
      deps/v8/ChangeLog
  2. 23
      deps/v8/SConstruct
  3. 25
      deps/v8/include/v8.h
  4. 7
      deps/v8/src/SConscript
  5. 214
      deps/v8/src/api.cc
  6. 85
      deps/v8/src/arm/code-stubs-arm.cc
  7. 57
      deps/v8/src/arm/code-stubs-arm.h
  8. 2
      deps/v8/src/arm/codegen-arm.h
  9. 17
      deps/v8/src/arm/full-codegen-arm.cc
  10. 99
      deps/v8/src/arm/ic-arm.cc
  11. 8
      deps/v8/src/arm/lithium-codegen-arm.cc
  12. 94
      deps/v8/src/arm/macro-assembler-arm.cc
  13. 10
      deps/v8/src/arm/macro-assembler-arm.h
  14. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  15. 55
      deps/v8/src/arm/stub-cache-arm.cc
  16. 112
      deps/v8/src/code-stubs.cc
  17. 173
      deps/v8/src/code-stubs.h
  18. 4
      deps/v8/src/codegen.cc
  19. 53
      deps/v8/src/conversions-inl.h
  20. 20
      deps/v8/src/conversions.cc
  21. 2
      deps/v8/src/conversions.h
  22. 4
      deps/v8/src/cpu-profiler-inl.h
  23. 12
      deps/v8/src/cpu-profiler.cc
  24. 10
      deps/v8/src/cpu-profiler.h
  25. 421
      deps/v8/src/d8.cc
  26. 3
      deps/v8/src/d8.gyp
  27. 91
      deps/v8/src/d8.h
  28. 7
      deps/v8/src/debug.cc
  29. 3
      deps/v8/src/debug.h
  30. 52
      deps/v8/src/deoptimizer.cc
  31. 36
      deps/v8/src/deoptimizer.h
  32. 19
      deps/v8/src/flag-definitions.h
  33. 4
      deps/v8/src/frames.cc
  34. 2
      deps/v8/src/frames.h
  35. 2
      deps/v8/src/full-codegen.h
  36. 5
      deps/v8/src/handles.cc
  37. 11
      deps/v8/src/heap-profiler.cc
  38. 8
      deps/v8/src/heap-profiler.h
  39. 74
      deps/v8/src/heap.cc
  40. 10
      deps/v8/src/heap.h
  41. 15
      deps/v8/src/hydrogen-instructions.cc
  42. 14
      deps/v8/src/hydrogen-instructions.h
  43. 74
      deps/v8/src/ia32/code-stubs-ia32.cc
  44. 53
      deps/v8/src/ia32/code-stubs-ia32.h
  45. 2
      deps/v8/src/ia32/codegen-ia32.h
  46. 18
      deps/v8/src/ia32/full-codegen-ia32.cc
  47. 106
      deps/v8/src/ia32/ic-ia32.cc
  48. 1
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  49. 7
      deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
  50. 98
      deps/v8/src/ia32/macro-assembler-ia32.cc
  51. 9
      deps/v8/src/ia32/macro-assembler-ia32.h
  52. 3
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  53. 69
      deps/v8/src/ia32/stub-cache-ia32.cc
  54. 41
      deps/v8/src/ic.cc
  55. 21
      deps/v8/src/ic.h
  56. 9
      deps/v8/src/isolate.cc
  57. 41
      deps/v8/src/isolate.h
  58. 1
      deps/v8/src/jsregexp.cc
  59. 1
      deps/v8/src/jsregexp.h
  60. 2
      deps/v8/src/lithium-allocator.cc
  61. 4
      deps/v8/src/log-inl.h
  62. 160
      deps/v8/src/log-utils.cc
  63. 105
      deps/v8/src/log-utils.h
  64. 132
      deps/v8/src/log.cc
  65. 16
      deps/v8/src/log.h
  66. 7
      deps/v8/src/mark-compact.cc
  67. 1
      deps/v8/src/messages.js
  68. 9
      deps/v8/src/mips/assembler-mips.h
  69. 5
      deps/v8/src/mips/builtins-mips.cc
  70. 102
      deps/v8/src/mips/code-stubs-mips.cc
  71. 63
      deps/v8/src/mips/code-stubs-mips.h
  72. 2
      deps/v8/src/mips/codegen-mips.h
  73. 67
      deps/v8/src/mips/full-codegen-mips.cc
  74. 116
      deps/v8/src/mips/ic-mips.cc
  75. 536
      deps/v8/src/mips/macro-assembler-mips.cc
  76. 104
      deps/v8/src/mips/macro-assembler-mips.h
  77. 4
      deps/v8/src/mips/regexp-macro-assembler-mips.cc
  78. 7
      deps/v8/src/mips/regexp-macro-assembler-mips.h
  79. 58
      deps/v8/src/mips/stub-cache-mips.cc
  80. 3
      deps/v8/src/mksnapshot.cc
  81. 16
      deps/v8/src/objects-visiting.h
  82. 107
      deps/v8/src/objects.cc
  83. 7
      deps/v8/src/objects.h
  84. 36
      deps/v8/src/parser.cc
  85. 23
      deps/v8/src/platform-cygwin.cc
  86. 21
      deps/v8/src/platform-freebsd.cc
  87. 24
      deps/v8/src/platform-linux.cc
  88. 19
      deps/v8/src/platform-macos.cc
  89. 16
      deps/v8/src/platform-nullos.cc
  90. 21
      deps/v8/src/platform-openbsd.cc
  91. 15
      deps/v8/src/platform-posix.cc
  92. 21
      deps/v8/src/platform-solaris.cc
  93. 2
      deps/v8/src/platform-tls.h
  94. 50
      deps/v8/src/platform-win32.cc
  95. 12
      deps/v8/src/platform.h
  96. 4
      deps/v8/src/profile-generator-inl.h
  97. 4
      deps/v8/src/profile-generator.cc
  98. 4
      deps/v8/src/profile-generator.h
  99. 1
      deps/v8/src/property.h
  100. 12
      deps/v8/src/proxy.js

33
deps/v8/ChangeLog

@ -1,3 +1,36 @@
2011-07-13: Version 3.4.12
Added --prof profiling option to d8 shell.
Fixed a bug where reading a directory in d8 shell hangs (issue 1533).
Fixed a potential assertion failure in const declarations.
Fixed an assertion failure in descriptor arrays (issue 1526).
Enabled fast thread-local storage by default on supported platforms.
Improved reporting of source position for global variable loads
(issue 1527).
2011-07-11: Version 3.4.11
Fixed MinGW32 build.
Fixed a GC bug with RegExp code flushing.
Implemented Object.defineProperty for proxies.
Fixed a bug in for/in iteration of arguments objects (issue 1531).
Added debugger support for inspecting optimized frames (issue 1140).
Allowed JSObject::PreventExtensions to work for arguments objects.
Bugfixes and performance work.
2011-07-06: Version 3.4.10
Fixed debugger not breaking on certain "if" statements (issue 1523).

23
deps/v8/SConstruct

@ -60,26 +60,17 @@ LIBRARY_FLAGS = {
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT']
},
'vmstate:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING'],
},
'objectprint:on': {
'CPPDEFINES': ['OBJECT_PRINT'],
},
'protectheap:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_HEAP_PROTECTION'],
},
'profilingsupport:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_LOGGING_AND_PROFILING'],
},
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
},
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
},
'fasttls:on': {
'CPPDEFINES': ['V8_FAST_TLS'],
'fasttls:off': {
'CPPDEFINES': ['V8_NO_FAST_TLS'],
},
'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
@ -929,21 +920,11 @@ SIMPLE_OPTIONS = {
'default': 'static',
'help': 'the type of library to produce'
},
'vmstate': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable VM state tracking'
},
'objectprint': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable object printing'
},
'protectheap': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable heap protection'
},
'profilingsupport': {
'values': ['on', 'off'],
'default': 'on',

25
deps/v8/include/v8.h

@ -2983,31 +2983,6 @@ class V8EXPORT V8 {
*/
static bool IsProfilerPaused();
/**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
*
* Caller provides a destination buffer that must exist during GetLogLines
* call. Only whole log lines are copied into the buffer.
*
* \param from_pos specified a point in a buffer to read from, 0 is the
* beginning of a buffer. It is assumed that caller updates its current
* position using returned size value from the previous call.
* \param dest_buf destination buffer for log data.
* \param max_size size of the destination buffer.
* \returns actual size of log data copied into buffer.
*/
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
/**
* The minimum allowed size for a log lines buffer. If the size of
* the buffer given will not be enough to hold a line of the maximum
* length, an attempt to find a log line end in GetLogLines will
* fail, and an empty result will be returned.
*/
static const int kMinimumSizeForLogLinesBuffer = 2048;
/**
* Retrieve the V8 thread id of the calling thread.
*

7
deps/v8/src/SConscript

@ -231,15 +231,11 @@ SOURCES = {
PREPARSER_SOURCES = {
'all': Split("""
allocation.cc
bignum.cc
cached-powers.cc
conversions.cc
hashmap.cc
preparse-data.cc
preparser.cc
preparser-api.cc
scanner-base.cc
strtod.cc
token.cc
unicode.cc
utils.cc
@ -317,10 +313,7 @@ def ConfigureObjectFiles():
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
if 'ENABLE_LOGGING_AND_PROFILING' in env['CPPDEFINES']:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
else:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET')
def BuildJS2CEnv(type):
js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }

214
deps/v8/src/api.cc

@ -54,16 +54,11 @@
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
#ifdef ENABLE_VMSTATE_TRACKING
#define ENTER_V8(isolate) \
ASSERT((isolate)->IsInitialized()); \
i::VMState __state__((isolate), i::OTHER)
#define LEAVE_V8(isolate) \
i::VMState __state__((isolate), i::EXTERNAL)
#else
#define ENTER_V8(isolate) ((void) 0)
#define LEAVE_V8(isolate) ((void) 0)
#endif
namespace v8 {
@ -114,9 +109,7 @@ namespace v8 {
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
#ifdef ENABLE_VMSTATE_TRACKING
i::VMState __state__(i::Isolate::Current(), i::OTHER);
#endif
API_Fatal(location, message);
}
@ -4832,37 +4825,20 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->PauseProfiler();
#endif
}
void V8::ResumeProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
isolate->logger()->ResumeProfiler();
#endif
}
bool V8::IsProfilerPaused() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
return isolate->logger()->IsProfilerPaused();
#else
return true;
#endif
}
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(max_size >= kMinimumSizeForLogLinesBuffer);
return LOGGER->GetLogLines(from_pos, dest_buf, max_size);
#endif
return 0;
}
@ -5327,7 +5303,6 @@ Local<Context> Debug::GetDebugContext() {
Handle<String> CpuProfileNode::GetFunctionName() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
@ -5340,117 +5315,77 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
isolate->factory()->LookupAsciiSymbol(entry->name()))));
}
#else
return v8::String::Empty();
#endif
}
Handle<String> CpuProfileNode::GetScriptResourceName() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
node->entry()->resource_name())));
#else
return v8::String::Empty();
#endif
}
int CpuProfileNode::GetLineNumber() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
#else
return 0;
#endif
}
double CpuProfileNode::GetTotalTime() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis();
#else
return 0.0;
#endif
}
double CpuProfileNode::GetSelfTime() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime");
return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis();
#else
return 0.0;
#endif
}
double CpuProfileNode::GetTotalSamplesCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
#else
return 0.0;
#endif
}
double CpuProfileNode::GetSelfSamplesCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
#else
return 0.0;
#endif
}
unsigned CpuProfileNode::GetCallUid() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
#else
return 0;
#endif
}
int CpuProfileNode::GetChildrenCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
#else
return 0;
#endif
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
#else
return NULL;
#endif
}
void CpuProfile::Delete() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::Delete");
i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
@ -5459,153 +5394,109 @@ void CpuProfile::Delete() {
// If this was the last profile, clean up all accessory data as well.
i::CpuProfiler::DeleteAllProfiles();
}
#endif
}
unsigned CpuProfile::GetUid() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
#else
return 0;
#endif
}
Handle<String> CpuProfile::GetTitle() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
profile->title())));
#else
return v8::String::Empty();
#endif
}
const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
#else
return NULL;
#endif
}
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
#else
return NULL;
#endif
}
int CpuProfiler::GetProfilesCount() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount");
return i::CpuProfiler::GetProfilesCount();
#else
return 0;
#endif
}
const CpuProfile* CpuProfiler::GetProfile(int index,
Handle<Value> security_token) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
index));
#else
return NULL;
#endif
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
uid));
#else
return NULL;
#endif
}
void CpuProfiler::StartProfiling(Handle<String> title) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling");
i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
#endif
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
Handle<Value> security_token) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
*Utils::OpenHandle(*title)));
#else
return NULL;
#endif
}
void CpuProfiler::DeleteAllProfiles() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
i::CpuProfiler::DeleteAllProfiles();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
return const_cast<i::HeapGraphEdge*>(
reinterpret_cast<const i::HeapGraphEdge*>(edge));
}
#endif
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType");
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
#else
return static_cast<HeapGraphEdge::Type>(0);
#endif
}
Handle<Value> HeapGraphEdge::GetName() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName");
i::HeapGraphEdge* edge = ToInternal(this);
@ -5622,166 +5513,112 @@ Handle<Value> HeapGraphEdge::GetName() const {
edge->index())));
default: UNREACHABLE();
}
#endif
return v8::Undefined();
}
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
const i::HeapEntry* from = ToInternal(this)->From();
return reinterpret_cast<const HeapGraphNode*>(from);
#else
return NULL;
#endif
}
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode");
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
#else
return NULL;
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(entry));
}
#endif
HeapGraphNode::Type HeapGraphNode::GetType() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetType");
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
#else
return static_cast<HeapGraphNode::Type>(0);
#endif
}
Handle<String> HeapGraphNode::GetName() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
ToInternal(this)->name())));
#else
return v8::String::Empty();
#endif
}
uint64_t HeapGraphNode::GetId() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetId");
return ToInternal(this)->id();
#else
return 0;
#endif
}
int HeapGraphNode::GetSelfSize() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size();
#else
return 0;
#endif
}
int HeapGraphNode::GetRetainedSize(bool exact) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
return ToInternal(this)->RetainedSize(exact);
#else
return 0;
#endif
}
int HeapGraphNode::GetChildrenCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount");
return ToInternal(this)->children().length();
#else
return 0;
#endif
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
return reinterpret_cast<const HeapGraphEdge*>(
&ToInternal(this)->children()[index]);
#else
return NULL;
#endif
}
int HeapGraphNode::GetRetainersCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount");
return ToInternal(this)->retainers().length();
#else
return 0;
#endif
}
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer");
return reinterpret_cast<const HeapGraphEdge*>(
ToInternal(this)->retainers()[index]);
#else
return NULL;
#endif
}
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator());
#else
return NULL;
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
}
#endif
void HeapSnapshot::Delete() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::Delete");
if (i::HeapProfiler::GetSnapshotsCount() > 1) {
@ -5790,93 +5627,63 @@ void HeapSnapshot::Delete() {
// If this is the last snapshot, clean up all accessory data as well.
i::HeapProfiler::DeleteAllSnapshots();
}
#endif
}
HeapSnapshot::Type HeapSnapshot::GetType() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetType");
return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
#else
return static_cast<HeapSnapshot::Type>(0);
#endif
}
unsigned HeapSnapshot::GetUid() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid();
#else
return 0;
#endif
}
Handle<String> HeapSnapshot::GetTitle() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
ToInternal(this)->title())));
#else
return v8::String::Empty();
#endif
}
const HeapGraphNode* HeapSnapshot::GetRoot() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead");
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
#else
return 0;
#endif
}
const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
#else
return NULL;
#endif
}
int HeapSnapshot::GetNodesCount() const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
return ToInternal(this)->entries()->length();
#else
return 0;
#endif
}
const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->entries()->at(index));
#else
return 0;
#endif
}
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize");
ApiCheck(format == kJSON,
@ -5890,49 +5697,35 @@ void HeapSnapshot::Serialize(OutputStream* stream,
"Invalid stream chunk size");
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
serializer.Serialize(stream);
#endif
}
int HeapProfiler::GetSnapshotsCount() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount");
return i::HeapProfiler::GetSnapshotsCount();
#else
return 0;
#endif
}
const HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::GetSnapshot(index));
#else
return NULL;
#endif
}
const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot");
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::FindSnapshot(uid));
#else
return NULL;
#endif
}
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
ActivityControl* control) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@ -5946,27 +5739,20 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control));
#else
return NULL;
#endif
}
void HeapProfiler::DeleteAllSnapshots() {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
i::HeapProfiler::DeleteAllSnapshots();
#endif
}
void HeapProfiler::DefineWrapperClass(uint16_t class_id,
WrapperInfoCallback callback) {
#ifdef ENABLE_LOGGING_AND_PROFILING
i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id,
callback);
#endif
}

85
deps/v8/src/arm/code-stubs-arm.cc

@ -304,12 +304,6 @@ class ConvertToDoubleStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "ConvertToDoubleStub"; }
#ifdef DEBUG
void Print() { PrintF("ConvertToDoubleStub\n"); }
#endif
};
@ -1689,25 +1683,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
const char* UnaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"UnaryOpStub_%s_%s_%s",
stream->Add("UnaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
UnaryOpIC::GetName(operand_type_));
return name_;
}
@ -2043,12 +2029,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) {
}
const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@ -2057,13 +2038,10 @@ const char* BinaryOpStub::GetName() {
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"BinaryOpStub_%s_%s_%s",
stream->Add("BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(operands_type_));
return name_;
}
@ -3568,7 +3546,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
@ -3584,7 +3561,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
__ push(ip);
#endif
// Call a faked try-block that does the invoke.
__ bl(&invoke);
@ -3645,7 +3621,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PopTryHandler();
__ bind(&exit); // r0 holds result
#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r5);
@ -3655,7 +3630,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ str(r6, MemOperand(r5));
__ bind(&non_outermost_js_2);
#endif
// Restore the top frame descriptors from the stack.
__ pop(r3);
@ -4755,16 +4729,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
void CompareStub::PrintName(StringStream* stream) {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
switch (cc_) {
case lt: cc_name = "LT"; break;
@ -4775,40 +4742,14 @@ const char* CompareStub::GetName() {
case ne: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
const char* strict_name = "";
if (strict_ && (cc_ == eq || cc_ == ne)) {
strict_name = "_STRICT";
}
const char* never_nan_nan_name = "";
if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
never_nan_nan_name = "_NO_NAN";
}
const char* include_number_compare_name = "";
if (!include_number_compare_) {
include_number_compare_name = "_NO_NUMBER";
}
const char* include_smi_compare_name = "";
if (!include_smi_compare_) {
include_smi_compare_name = "_NO_SMI";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s%s%s",
cc_name,
lhs_name,
rhs_name,
strict_name,
never_nan_nan_name,
include_number_compare_name,
include_smi_compare_name);
return name_;
bool is_equality = cc_ == eq || cc_ == ne;
stream->Add("CompareStub_%s", cc_name);
stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
if (strict_ && is_equality) stream->Add("_STRICT");
if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
if (!include_number_compare_) stream->Add("_NO_NUMBER");
if (!include_smi_compare_) stream->Add("_NO_SMI");
}

57
deps/v8/src/arm/code-stubs-arm.h

@ -65,8 +65,7 @@ class UnaryOpStub: public CodeStub {
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(operand_type),
name_(NULL) {
operand_type_(operand_type) {
}
private:
@ -76,19 +75,7 @@ class UnaryOpStub: public CodeStub {
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
UnaryOpIC::GetName(operand_type_));
}
#endif
virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@ -142,8 +129,7 @@ class BinaryOpStub: public CodeStub {
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -156,8 +142,7 @@ class BinaryOpStub: public CodeStub {
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@ -173,20 +158,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
BinaryOpIC::GetName(operands_type_));
}
#endif
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@ -370,12 +342,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
@ -402,8 +368,6 @@ class NumberToStringStub: public CodeStub {
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
};
@ -421,8 +385,6 @@ class RegExpCEntryStub: public CodeStub {
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "RegExpCEntryStub"; }
};
@ -443,8 +405,6 @@ class DirectCEntryStub: public CodeStub {
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "DirectCEntryStub"; }
};
@ -627,13 +587,6 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
#ifdef DEBUG
void Print() {
PrintF("StringDictionaryLookupStub\n");
}
#endif
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {

2
deps/v8/src/arm/codegen-arm.h

@ -58,9 +58,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,

17
deps/v8/src/arm/full-codegen-arm.cc

@ -776,7 +776,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(r0);
@ -1113,7 +1113,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var());
EmitVariableLoad(expr);
}
@ -1262,7 +1262,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Record position before possible IC call.
SetSourcePosition(proxy->position());
Variable* var = proxy->var();
// Three cases: non-this global variables, lookup slots, and all other
// types of slots.
Slot* slot = var->AsSlot();
@ -1593,7 +1597,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@ -2772,13 +2776,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
@ -3816,7 +3819,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {

99
deps/v8/src/arm/ic-arm.cc

@ -212,101 +212,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ mvn(t1, Operand(t0));
__ add(t0, t1, Operand(t0, LSL, 15));
// hash = hash ^ (hash >> 12);
__ eor(t0, t0, Operand(t0, LSR, 12));
// hash = hash + (hash << 2);
__ add(t0, t0, Operand(t0, LSL, 2));
// hash = hash ^ (hash >> 4);
__ eor(t0, t0, Operand(t0, LSR, 4));
// hash = hash * 2057;
__ mov(t1, Operand(2057));
__ mul(t0, t0, t1);
// hash = hash ^ (hash >> 16);
__ eor(t0, t0, Operand(t0, LSR, 16));
// Compute the capacity mask.
__ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
__ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
__ sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
__ mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
}
__ and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
__ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
__ cmp(key, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
} else {
__ b(ne, miss);
}
}
__ bind(&done);
// Check that the value is a normal property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
__ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
__ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ ldr(result, FieldMemOperand(t2, kValueOffset));
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@ -738,7 +643,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ b(ne, &slow_load);
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
__ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
@ -1127,7 +1032,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
__ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.

8
deps/v8/src/arm/lithium-codegen-arm.cc

@ -551,6 +551,13 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
RecordPosition(pointers->position());
__ Call(code, mode);
RegisterLazyDeoptimization(instr, safepoint_mode);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
@ -1506,6 +1513,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}

94
deps/v8/src/arm/macro-assembler-arm.cc

@ -1343,6 +1343,100 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
//
// t1 - used to hold the capacity mask of the dictionary
//
// t2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
mvn(t1, Operand(t0));
add(t0, t1, Operand(t0, LSL, 15));
// hash = hash ^ (hash >> 12);
eor(t0, t0, Operand(t0, LSR, 12));
// hash = hash + (hash << 2);
add(t0, t0, Operand(t0, LSL, 2));
// hash = hash ^ (hash >> 4);
eor(t0, t0, Operand(t0, LSR, 4));
// hash = hash * 2057;
mov(t1, Operand(2057));
mul(t0, t0, t1);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
sub(t1, t1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use t2 for index calculations and keep the hash intact in t0.
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
}
and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
if (i != kProbes - 1) {
b(eq, &done);
} else {
b(ne, miss);
}
}
bind(&done);
// Check that the value is a normal property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
ldr(result, FieldMemOperand(t2, kValueOffset));
}
void MacroAssembler::AllocateInNewSpace(int object_size,
Register result,
Register scratch1,

10
deps/v8/src/arm/macro-assembler-arm.h

@ -433,6 +433,16 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* miss);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register t0,
Register t1,
Register t2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -28,6 +28,9 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
namespace v8 {
namespace internal {

55
deps/v8/src/arm/stub-cache-arm.cc

@ -3100,7 +3100,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// -- r1 : receiver
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r1,
r2,
@ -3193,7 +3194,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- r3 : scratch
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r2,
r3,
@ -3388,6 +3392,53 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
#define __ ACCESS_MASM(masm)
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Label slow, miss_force_generic;
Register key = r0;
Register receiver = r1;
__ JumpIfNotSmi(key, &miss_force_generic);
__ mov(r2, Operand(key, ASR, kSmiTagSize));
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
__ Ret();
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, r2, r3);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ Jump(slow_ic, RelocInfo::CODE_TARGET);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
}
static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:

112
deps/v8/src/code-stubs.cc

@ -61,21 +61,29 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
}
SmartPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
PrintName(&stream);
return stream.ToCString();
}
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
Isolate* isolate = masm->isolate();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
SmartPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
#ifdef DEBUG
Print();
#endif
code->Disassemble(GetName());
code->Disassemble(*name);
PrintF("\n");
}
#endif
@ -170,7 +178,7 @@ MaybeObject* CodeStub::TryGetCode() {
const char* CodeStub::MajorName(CodeStub::Major major_key,
bool allow_unknown_keys) {
switch (major_key) {
#define DEF_CASE(name) case name: return #name;
#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
default:
@ -213,13 +221,7 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}
const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void InstanceofStub::PrintName(StringStream* stream) {
const char* args = "";
if (HasArgsInRegisters()) {
args = "_REGS";
@ -235,33 +237,95 @@ const char* InstanceofStub::GetName() {
return_true_false_object = "_TRUEFALSE";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"InstanceofStub%s%s%s",
stream->Add("InstanceofStub%s%s%s",
args,
inline_check,
return_true_false_object);
return name_;
}
void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case JSObject::FAST_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
break;
case JSObject::FAST_DOUBLE_ELEMENTS:
UNIMPLEMENTED();
break;
case JSObject::EXTERNAL_BYTE_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case JSObject::EXTERNAL_SHORT_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case JSObject::EXTERNAL_INT_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
case JSObject::EXTERNAL_PIXEL_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
break;
case JSObject::DICTIONARY_ELEMENTS:
KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
break;
case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
switch (elements_kind_) {
case JSObject::FAST_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
break;
case JSObject::FAST_DOUBLE_ELEMENTS:
UNIMPLEMENTED();
break;
case JSObject::EXTERNAL_BYTE_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case JSObject::EXTERNAL_SHORT_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case JSObject::EXTERNAL_INT_ELEMENTS:
case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
case JSObject::EXTERNAL_FLOAT_ELEMENTS:
case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
case JSObject::EXTERNAL_PIXEL_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
break;
case JSObject::DICTIONARY_ELEMENTS:
KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
break;
case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
void ArgumentsAccessStub::PrintName(StringStream* stream) {
const char* type_name = NULL; // Make g++ happy.
switch (type_) {
case READ_ELEMENT: type_name = "ReadElement"; break;
case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
case NEW_STRICT: type_name = "NewStrict"; break;
}
stream->Add("ArgumentsAccessStub_%s", type_name);
}
void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
void CallFunctionStub::PrintName(StringStream* stream) {
const char* in_loop_name = NULL; // Make g++ happy.
switch (in_loop_) {
case NOT_IN_LOOP: in_loop_name = ""; break;
case IN_LOOP: in_loop_name = "_InLoop"; break;
}
const char* flags_name = NULL; // Make g++ happy.
switch (flags_) {
case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
}
stream->Add("CallFunctionStub_Args%d%s%s", argc_, in_loop_name, flags_name);
}
} } // namespace v8::internal

173
deps/v8/src/code-stubs.h

@ -70,10 +70,8 @@ namespace internal {
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
V(KeyedLoadFastElement) \
V(KeyedStoreFastElement) \
V(KeyedLoadExternalArray) \
V(KeyedStoreExternalArray) \
V(KeyedLoadElement) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
@ -183,16 +181,15 @@ class CodeStub BASE_EMBEDDED {
}
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }
SmartPointer<const char> GetName();
virtual void PrintName(StringStream* stream) {
stream->Add("%s", MajorName(MajorKey(), false));
}
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
#ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
#endif
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@ -274,8 +271,6 @@ class StackCheckStub : public CodeStub {
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; }
int MinorKey() { return 0; }
};
@ -290,7 +285,6 @@ class ToNumberStub: public CodeStub {
private:
Major MajorKey() { return ToNumber; }
int MinorKey() { return 0; }
const char* GetName() { return "ToNumberStub"; }
};
@ -302,7 +296,6 @@ class FastNewClosureStub : public CodeStub {
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return strict_mode_; }
@ -323,7 +316,6 @@ class FastNewContextStub : public CodeStub {
private:
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
@ -352,7 +344,6 @@ class FastCloneShallowArrayStub : public CodeStub {
Mode mode_;
int length_;
const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
ASSERT(mode_ == 0 || mode_ == 1);
@ -370,7 +361,7 @@ class InstanceofStub: public CodeStub {
kReturnTrueFalseObject = 1 << 2
};
explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { }
explicit InstanceofStub(Flags flags) : flags_(flags) { }
static Register left();
static Register right();
@ -393,10 +384,9 @@ class InstanceofStub: public CodeStub {
return (flags_ & kReturnTrueFalseObject) != 0;
}
const char* GetName();
virtual void PrintName(StringStream* stream);
Flags flags_;
char* name_;
};
@ -408,8 +398,6 @@ class MathPowStub: public CodeStub {
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
virtual int MinorKey() { return 0; }
const char* GetName() { return "MathPowStub"; }
};
@ -476,8 +464,7 @@ class CompareStub: public CodeStub {
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(lhs),
rhs_(rhs),
name_(NULL) { }
rhs_(rhs) { }
CompareStub(Condition cc,
bool strict,
@ -488,8 +475,7 @@ class CompareStub: public CodeStub {
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(no_reg),
rhs_(no_reg),
name_(NULL) { }
rhs_(no_reg) { }
void Generate(MacroAssembler* masm);
@ -543,26 +529,7 @@ class CompareStub: public CodeStub {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
"(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
MinorKey(),
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_smi_compare_ ? "inluded" : "not included",
include_number_compare_ ? "included" : "not included");
if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
} else {
PrintF("\n");
}
}
#endif
virtual void PrintName(StringStream* stream);
};
@ -593,8 +560,6 @@ class CEntryStub : public CodeStub {
int MinorKey();
bool NeedsImmovableCode();
const char* GetName() { return "CEntryStub"; }
};
@ -610,8 +575,6 @@ class JSEntryStub : public CodeStub {
private:
Major MajorKey() { return JSEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "JSEntryStub"; }
};
@ -624,7 +587,9 @@ class JSConstructEntryStub : public JSEntryStub {
private:
int MinorKey() { return 1; }
const char* GetName() { return "JSConstructEntryStub"; }
virtual void PrintName(StringStream* stream) {
stream->Add("JSConstructEntryStub");
}
};
@ -651,13 +616,7 @@ class ArgumentsAccessStub: public CodeStub {
void GenerateNewNonStrictFast(MacroAssembler* masm);
void GenerateNewNonStrictSlow(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
#ifdef DEBUG
void Print() {
PrintF("ArgumentsAccessStub (type %d)\n", type_);
}
#endif
virtual void PrintName(StringStream* stream);
};
@ -670,14 +629,6 @@ class RegExpExecStub: public CodeStub {
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpExecStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpExecStub\n");
}
#endif
};
@ -690,14 +641,6 @@ class RegExpConstructResultStub: public CodeStub {
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpConstructResultStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpConstructResultStub\n");
}
#endif
};
@ -717,14 +660,7 @@ class CallFunctionStub: public CodeStub {
InLoopFlag in_loop_;
CallFunctionFlags flags_;
#ifdef DEBUG
void Print() {
PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
argc_,
static_cast<int>(in_loop_),
static_cast<int>(flags_));
}
#endif
virtual void PrintName(StringStream* stream);
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
@ -921,83 +857,44 @@ class AllowStubCallsScope {
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
};
#ifdef DEBUG
#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
#else
#define DECLARE_ARRAY_STUB_PRINT(name)
#endif
class KeyedLoadFastElementStub : public CodeStub {
class KeyedLoadElementStub : public CodeStub {
public:
explicit KeyedLoadFastElementStub() {
}
explicit KeyedLoadElementStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind)
{ }
Major MajorKey() { return KeyedLoadFastElement; }
int MinorKey() { return 0; }
Major MajorKey() { return KeyedLoadElement; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadFastElementStub"; }
private:
JSObject::ElementsKind elements_kind_;
DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
};
class KeyedStoreFastElementStub : public CodeStub {
class KeyedStoreElementStub : public CodeStub {
public:
explicit KeyedStoreFastElementStub(bool is_js_array)
: is_js_array_(is_js_array) { }
KeyedStoreElementStub(bool is_js_array,
JSObject::ElementsKind elements_kind)
: is_js_array_(is_js_array),
elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedStoreFastElement; }
int MinorKey() { return is_js_array_ ? 1 : 0; }
Major MajorKey() { return KeyedStoreElement; }
int MinorKey() {
return (is_js_array_ ? 0 : JSObject::kElementsKindCount) + elements_kind_;
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreFastElementStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
private:
bool is_js_array_;
};
class KeyedLoadExternalArrayStub : public CodeStub {
public:
explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedLoadExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
};
class KeyedStoreExternalArrayStub : public CodeStub {
public:
explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedStoreExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
};

4
deps/v8/src/codegen.cc

@ -169,8 +169,6 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#endif // ENABLE_DISASSEMBLER
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static Vector<const char> kRegexp = CStrVector("regexp");
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
@ -187,8 +185,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
return false;
}
#endif
bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos,

53
deps/v8/src/conversions-inl.h

@ -43,6 +43,11 @@
namespace v8 {
namespace internal {
static inline double JunkStringValue() {
return std::numeric_limits<double>::quiet_NaN();
}
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
@ -151,7 +156,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
!AdvanceToNonspace(unicode_cache, &current, end)) {
break;
} else {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
}
@ -181,7 +186,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
int middle_value = (1 << (overflow_bits_count - 1));
@ -229,7 +234,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
EndMark end,
int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE;
const double empty_string_val = JunkStringValue();
if (!AdvanceToNonspace(unicode_cache, &current, end)) {
return empty_string_val;
@ -242,12 +247,12 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
// Ignore leading sign; skip following spaces.
++current;
if (current == end) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
} else if (*current == '-') {
++current;
if (current == end) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
negative = true;
}
@ -260,7 +265,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (*current == 'x' || *current == 'X') {
radix = 16;
++current;
if (current == end) return JUNK_STRING_VALUE;
if (current == end) return JunkStringValue();
} else {
radix = 8;
leading_zero = true;
@ -275,14 +280,14 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (current == end) return SignedZero(negative);
if (*current == 'x' || *current == 'X') {
++current;
if (current == end) return JUNK_STRING_VALUE;
if (current == end) return JunkStringValue();
} else {
leading_zero = true;
}
}
}
if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
if (radix < 2 || radix > 36) return JunkStringValue();
// Skip leading zeros.
while (*current == '0') {
@ -292,7 +297,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
}
if (!leading_zero && !isDigit(*current, radix)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
if (IsPowerOf2(radix)) {
@ -340,7 +345,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
ASSERT(buffer_pos < kBufferSize);
@ -406,7 +411,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
return negative ? -v : v;
@ -456,22 +461,22 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
if (*current == '+') {
// Ignore leading sign.
++current;
if (current == end) return JUNK_STRING_VALUE;
if (current == end) return JunkStringValue();
} else if (*current == '-') {
++current;
if (current == end) return JUNK_STRING_VALUE;
if (current == end) return JunkStringValue();
negative = true;
}
static const char kInfinitySymbol[] = "Infinity";
if (*current == kInfinitySymbol[0]) {
if (!SubStringEquals(&current, end, kInfinitySymbol)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
ASSERT(buffer_pos == 0);
@ -489,7 +494,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
if (current == end || !isDigit(*current, 16)) {
return JUNK_STRING_VALUE; // "0x".
return JunkStringValue(); // "0x".
}
return InternalStringToIntDouble<4>(unicode_cache,
@ -529,13 +534,13 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
}
if (*current == '.') {
if (octal && !allow_trailing_junk) return JUNK_STRING_VALUE;
if (octal && !allow_trailing_junk) return JunkStringValue();
if (octal) goto parsing_done;
++current;
if (current == end) {
if (significant_digits == 0 && !leading_zero) {
return JUNK_STRING_VALUE;
return JunkStringValue();
} else {
goto parsing_done;
}
@ -576,18 +581,18 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
// If exponent < 0 then string was [+-]\.0*...
// If significant_digits != 0 the string is not equal to 0.
// Otherwise there are no digits in the string.
return JUNK_STRING_VALUE;
return JunkStringValue();
}
// Parse exponential part.
if (*current == 'e' || *current == 'E') {
if (octal) return JUNK_STRING_VALUE;
if (octal) return JunkStringValue();
++current;
if (current == end) {
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
}
char sign = '+';
@ -598,7 +603,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
}
}
@ -607,7 +612,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
if (allow_trailing_junk) {
goto parsing_done;
} else {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
}
@ -631,7 +636,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
return JunkStringValue();
}
parsing_done:

20
deps/v8/src/conversions.cc

@ -430,24 +430,4 @@ char* DoubleToRadixCString(double value, int radix) {
return builder.Finalize();
}
static Mutex* dtoa_lock_one = OS::CreateMutex();
static Mutex* dtoa_lock_zero = OS::CreateMutex();
} } // namespace v8::internal
extern "C" {
void ACQUIRE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
}
void FREE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
Unlock();
}
}

2
deps/v8/src/conversions.h

@ -44,8 +44,6 @@ namespace internal {
// we don't need to preserve all the digits.
const int kMaxSignificantDigits = 772;
static const double JUNK_STRING_VALUE =
std::numeric_limits<double>::quiet_NaN();
static bool isDigit(int x, int radix) {
return (x >= '0' && x <= '9' && x < '0' + radix)

4
deps/v8/src/cpu-profiler-inl.h

@ -30,8 +30,6 @@
#include "cpu-profiler.h"
#ifdef ENABLE_LOGGING_AND_PROFILING
#include <new>
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
@ -83,6 +81,4 @@ bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_CPU_PROFILER_INL_H_

12
deps/v8/src/cpu-profiler.cc

@ -29,8 +29,6 @@
#include "cpu-profiler-inl.h"
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h"
@ -574,31 +572,21 @@ void CpuProfiler::StopProcessor() {
logger->logging_nesting_ = saved_logging_nesting_;
}
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
namespace internal {
void CpuProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());
}
#endif
}
void CpuProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() != NULL) {
delete isolate->cpu_profiler();
}
isolate->set_cpu_profiler(NULL);
#endif
}
} } // namespace v8::internal

10
deps/v8/src/cpu-profiler.h

@ -28,8 +28,6 @@
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
@ -206,9 +204,6 @@ class ProfilerEventsProcessor : public Thread {
v8::internal::CpuProfiler::Call; \
} \
} while (false)
#else
#define PROFILE(isolate, Call) LOG(isolate, Call)
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
@ -221,7 +216,6 @@ class CpuProfiler {
static void Setup();
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
@ -289,10 +283,6 @@ class CpuProfiler {
bool need_to_stop_sampler_;
Atomic32 is_profiling_;
#else
static INLINE(bool is_profiling(Isolate* isolate)) { return false; }
#endif // ENABLE_LOGGING_AND_PROFILING
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};

421
deps/v8/src/d8.cc

@ -41,6 +41,9 @@
#include "natives.h"
#include "platform.h"
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#endif
namespace v8 {
@ -97,6 +100,8 @@ CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
Persistent<Context> Shell::utility_context_;
Persistent<Context> Shell::evaluation_context_;
i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
ShellOptions Shell::options;
bool CounterMap::Match(void* key1, void* key2) {
@ -119,6 +124,7 @@ bool Shell::ExecuteString(Handle<String> source,
bool report_exceptions) {
HandleScope handle_scope;
TryCatch try_catch;
options.script_executed = true;
if (i::FLAG_debugger) {
// When debugging make exceptions appear to be uncaught.
try_catch.SetVerbose(true);
@ -238,7 +244,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
if (raw_length < 0) {
return ThrowException(String::New("Array length must not be negative."));
}
if (raw_length > v8::internal::ExternalArray::kMaxLength) {
if (raw_length > i::ExternalArray::kMaxLength) {
return ThrowException(
String::New("Array length exceeds maximum length."));
}
@ -246,7 +252,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
} else {
return ThrowException(String::New("Array length must be a number."));
}
if (length > static_cast<size_t>(internal::ExternalArray::kMaxLength)) {
if (length > static_cast<size_t>(i::ExternalArray::kMaxLength)) {
return ThrowException(String::New("Array length exceeds maximum length."));
}
void* data = calloc(length, element_size);
@ -540,7 +546,6 @@ void Shell::InstallUtilityScript() {
shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
@ -550,6 +555,13 @@ void Shell::InstallUtilityScript() {
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif
}
@ -625,7 +637,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
}
void Shell::Initialize(bool test_shell) {
void Shell::Initialize() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
@ -645,7 +657,7 @@ void Shell::Initialize(bool test_shell) {
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
if (test_shell) return;
if (options.test_shell) return;
Locker lock;
HandleScope scope;
@ -657,26 +669,17 @@ void Shell::Initialize(bool test_shell) {
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
}
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif
}
void Shell::RenewEvaluationContext() {
Persistent<Context> Shell::CreateEvaluationContext() {
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
// Initialize the global objects
HandleScope scope;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
// (Re-)create the evaluation context
if (!evaluation_context_.IsEmpty()) {
evaluation_context_.Dispose();
}
evaluation_context_ = Context::New(NULL, global_template);
Context::Scope utility_scope(evaluation_context_);
Persistent<Context> context = Context::New(NULL, global_template);
Context::Scope scope(context);
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
@ -688,28 +691,27 @@ void Shell::RenewEvaluationContext() {
}
i::Handle<i::JSArray> arguments_jsarray =
FACTORY->NewJSArrayWithElements(arguments_array);
evaluation_context_->Global()->Set(String::New("arguments"),
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
return context;
}
void Shell::OnExit() {
if (i::FLAG_dump_counters) {
::printf("+----------------------------------------+-------------+\n");
::printf("| Name | Value |\n");
::printf("+----------------------------------------+-------------+\n");
printf("+----------------------------------------+-------------+\n");
printf("| Name | Value |\n");
printf("+----------------------------------------+-------------+\n");
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
Counter* counter = i.CurrentValue();
if (counter->is_histogram()) {
::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
::printf("| t:%-36s | %11i |\n",
i.CurrentKey(),
counter->sample_total());
printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
} else {
::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
}
}
::printf("+----------------------------------------+-------------+\n");
printf("+----------------------------------------+-------------+\n");
}
if (counters_file_ != NULL)
delete counters_file_;
@ -717,7 +719,8 @@ void Shell::OnExit() {
static char* ReadChars(const char* name, int* size_out) {
v8::Unlocker unlocker; // Release the V8 lock while reading files.
// Release the V8 lock while reading files.
v8::Unlocker unlocker(Isolate::GetCurrent());
FILE* file = i::OS::FOpen(name, "rb");
if (file == NULL) return NULL;
@ -806,11 +809,6 @@ class ShellThread : public i::Thread {
void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker;
HandleScope scope;
Handle<ObjectTemplate> global_template = Shell::CreateGlobalTemplate();
char* ptr = const_cast<char*>(files_.start());
while ((ptr != NULL) && (*ptr != '\0')) {
// For each newline-separated line.
@ -822,7 +820,10 @@ void ShellThread::Run() {
continue;
}
Persistent<Context> thread_context = Context::New(NULL, global_template);
// Prepare the context for this thread.
Locker locker;
HandleScope scope;
Persistent<Context> thread_context = Shell::CreateEvaluationContext();
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@ -848,153 +849,296 @@ void ShellThread::Run() {
}
}
int Shell::RunMain(int argc, char* argv[], bool* executed) {
// Default use preemption if threads are created.
bool use_preemption = true;
// Default to use lowest possible thread preemption interval to test as many
// edgecases as possible.
int preemption_interval = 1;
void SourceGroup::ExitShell(int exit_code) {
// Use _exit instead of exit to avoid races between isolate
// threads and static destructors.
fflush(stdout);
fflush(stderr);
_exit(exit_code);
}
i::List<i::Thread*> threads(1);
{
// Since the thread below may spawn new threads accessing V8 holding the
// V8 lock here is mandatory.
Locker locker;
RenewEvaluationContext();
Context::Scope context_scope(evaluation_context_);
for (int i = 1; i < argc; i++) {
char* str = argv[i];
if (strcmp(str, "--preemption") == 0) {
use_preemption = true;
} else if (strcmp(str, "--no-preemption") == 0) {
use_preemption = false;
} else if (strcmp(str, "--preemption-interval") == 0) {
if (i + 1 < argc) {
char* end = NULL;
preemption_interval = strtol(argv[++i], &end, 10); // NOLINT
if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) {
printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
return 1;
}
} else {
printf("Missing value for --preemption-interval\n");
return 1;
}
} else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
void SourceGroup::Execute() {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[++i]);
(*executed) = true;
if (!ExecuteString(source, file_name, false, true)) {
OnExit();
return 1;
}
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
int size = 0;
const char* files = ReadChars(argv[++i], &size);
if (files == NULL) return 1;
ShellThread* thread =
new ShellThread(threads.length(),
i::Vector<const char>(files, size));
thread->Start();
threads.Add(thread);
(*executed) = true;
HandleScope handle_scope;
Handle<String> file_name = String::New("unnamed");
Handle<String> source = String::New(argv_[i + 1]);
if (!Shell::ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
++i;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
} else {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = v8::String::New(str);
Handle<String> source = ReadFile(str);
(*executed) = true;
Handle<String> file_name = String::New(arg);
Handle<String> source = ReadFile(arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
printf("Error reading '%s'\n", arg);
ExitShell(1);
return;
}
if (!Shell::ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
if (!ExecuteString(source, file_name, false, true)) {
OnExit();
return 1;
}
}
}
// Start preemption if threads have been created and preemption is enabled.
if (threads.length() > 0 && use_preemption) {
Locker::StartPreemption(preemption_interval);
Handle<String> SourceGroup::ReadFile(const char* name) {
FILE* file = fopen(name, "rb");
if (file == NULL) return Handle<String>();
fseek(file, 0, SEEK_END);
int size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
for (int i = 0; i < size;) {
int read = fread(&chars[i], 1, size - i, file);
i += read;
}
fclose(file);
Handle<String> result = String::New(chars, size);
delete[] chars;
return result;
}
for (int i = 0; i < threads.length(); i++) {
i::Thread* thread = threads[i];
thread->Join();
delete thread;
i::Thread::Options SourceGroup::GetThreadOptions() {
i::Thread::Options options;
options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code.
options.stack_size = 2 << 20; // 2 Mb seems to be enough
return options;
}
OnExit();
return 0;
void SourceGroup::ExecuteInThread() {
Isolate* isolate = Isolate::New();
do {
if (next_semaphore_ != NULL) next_semaphore_->Wait();
{
Isolate::Scope iscope(isolate);
Locker lock(isolate);
HandleScope scope;
Persistent<Context> context = Shell::CreateEvaluationContext();
{
Context::Scope cscope(context);
Execute();
}
context.Dispose();
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!Shell::options.last_run);
isolate->Dispose();
}
int Shell::Main(int argc, char* argv[]) {
// Figure out if we're requested to stress the optimization
// infrastructure by running tests multiple times and forcing
// optimization in the last run.
bool FLAG_stress_opt = false;
bool FLAG_stress_deopt = false;
bool FLAG_interactive_shell = false;
bool FLAG_test_shell = false;
bool script_executed = false;
void SourceGroup::StartExecuteInThread() {
if (thread_ == NULL) {
thread_ = new IsolateThread(this);
thread_->Start();
}
next_semaphore_->Signal();
}
void SourceGroup::WaitForThread() {
if (thread_ == NULL) return;
if (Shell::options.last_run) {
thread_->Join();
thread_ = NULL;
} else {
done_semaphore_->Wait();
}
}
bool Shell::SetOptions(int argc, char* argv[]) {
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "--stress-opt") == 0) {
FLAG_stress_opt = true;
options.stress_opt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
FLAG_stress_deopt = true;
options.stress_deopt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--noalways-opt") == 0) {
// No support for stressing if we can't use --always-opt.
FLAG_stress_opt = false;
FLAG_stress_deopt = false;
options.stress_opt = false;
options.stress_deopt = false;
} else if (strcmp(argv[i], "--shell") == 0) {
FLAG_interactive_shell = true;
options.interactive_shell = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--test") == 0) {
FLAG_test_shell = true;
options.test_shell = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--preemption") == 0) {
options.use_preemption = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--no-preemption") == 0) {
options.use_preemption = false;
argv[i] = NULL;
} else if (strcmp(argv[i], "--preemption-interval") == 0) {
if (++i < argc) {
argv[i-1] = NULL;
char* end = NULL;
options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT
if (options.preemption_interval <= 0
|| *end != '\0'
|| errno == ERANGE) {
printf("Invalid value for --preemption-interval '%s'\n", argv[i]);
return false;
}
argv[i] = NULL;
} else {
printf("Missing value for --preemption-interval\n");
return false;
}
} else if (strcmp(argv[i], "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
continue;
} else if (strcmp(argv[i], "--isolate") == 0) {
options.num_isolates++;
}
}
// Run parallel threads if we are not using --isolate
for (int i = 1; i < argc; i++) {
if (argv[i] == NULL) continue;
if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) {
if (options.num_isolates > 1) {
printf("-p is not compatible with --isolate\n");
return false;
}
argv[i] = NULL;
if (options.parallel_files == NULL) {
options.parallel_files = new i::List<i::Vector<const char> >();
}
int size = 0;
const char* files = ReadChars(argv[++i], &size);
if (files == NULL) {
printf("-p option incomplete\n");
return false;
}
argv[i] = NULL;
options.parallel_files->Add(i::Vector<const char>(files, size));
}
}
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
Initialize(FLAG_test_shell);
// set up isolated source groups
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
current->Begin(argv, 1);
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--isolate") == 0) {
current->End(i);
current++;
current->Begin(argv, i + 1);
} else if (strncmp(argv[i], "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]);
}
}
current->End(argc);
return true;
}
int Shell::RunMain(int argc, char* argv[]) {
i::List<i::Thread*> threads(1);
{
if (options.parallel_files != NULL)
for (int i = 0; i < options.parallel_files->length(); i++) {
i::Vector<const char> files = options.parallel_files->at(i);
ShellThread* thread = new ShellThread(threads.length(), files);
thread->Start();
threads.Add(thread);
}
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].StartExecuteInThread();
}
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext();
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute();
}
if (options.last_run) {
// Keep using the same context in the interactive shell
evaluation_context_ = context;
} else {
context.Dispose();
}
// Start preemption if threads have been created and preemption is enabled.
if (options.parallel_files != NULL
&& threads.length() > 0
&& options.use_preemption) {
Locker::StartPreemption(options.preemption_interval);
}
}
for (int i = 1; i < options.num_isolates; ++i) {
options.isolate_sources[i].WaitForThread();
}
if (options.parallel_files != NULL)
for (int i = 0; i < threads.length(); i++) {
i::Thread* thread = threads[i];
thread->Join();
delete thread;
}
OnExit();
return 0;
}
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
Initialize();
int result = 0;
if (FLAG_stress_opt || FLAG_stress_deopt) {
v8::Testing::SetStressRunType(
FLAG_stress_opt ? v8::Testing::kStressTypeOpt
: v8::Testing::kStressTypeDeopt);
int stress_runs = v8::Testing::GetStressRuns();
if (options.stress_opt || options.stress_deopt) {
Testing::SetStressRunType(
options.stress_opt ? Testing::kStressTypeOpt
: Testing::kStressTypeDeopt);
int stress_runs = Testing::GetStressRuns();
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv, &script_executed);
Testing::PrepareStressRun(i);
options.last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv, &script_executed);
result = RunMain(argc, argv);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Run remote debugger if requested, but never on --test
if (i::FLAG_remote_debugger && !FLAG_test_shell) {
if (i::FLAG_remote_debugger && !options.test_shell) {
InstallUtilityScript();
RunRemoteDebugger(i::FLAG_debugger_port);
return 0;
@ -1003,12 +1147,15 @@ int Shell::Main(int argc, char* argv[]) {
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
if ((FLAG_interactive_shell || !script_executed) && !FLAG_test_shell) {
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
InstallUtilityScript();
RunShell();
}
v8::V8::Dispose();
V8::Dispose();
return result;
}

3
deps/v8/src/d8.gyp

@ -38,10 +38,7 @@
'../src',
],
'defines': [
'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
'ENABLE_VMSTATE_TRACKING',
'V8_FAST_TLS',
],
'sources': [
'd8.cc',

91
deps/v8/src/d8.h

@ -112,6 +112,87 @@ class CounterMap {
};
class SourceGroup {
public:
SourceGroup()
: next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
thread_(NULL),
argv_(NULL),
begin_offset_(0),
end_offset_(0) { }
void Begin(char** argv, int offset) {
argv_ = const_cast<const char**>(argv);
begin_offset_ = offset;
}
void End(int offset) { end_offset_ = offset; }
void Execute();
void StartExecuteInThread();
void WaitForThread();
private:
class IsolateThread : public i::Thread {
public:
explicit IsolateThread(SourceGroup* group)
: i::Thread(GetThreadOptions()), group_(group) {}
virtual void Run() {
group_->ExecuteInThread();
}
private:
SourceGroup* group_;
};
static i::Thread::Options GetThreadOptions();
void ExecuteInThread();
i::Semaphore* next_semaphore_;
i::Semaphore* done_semaphore_;
i::Thread* thread_;
void ExitShell(int exit_code);
Handle<String> ReadFile(const char* name);
const char** argv_;
int begin_offset_;
int end_offset_;
};
class ShellOptions {
public:
ShellOptions()
: script_executed(false),
last_run(true),
stress_opt(false),
stress_deopt(false),
interactive_shell(false),
test_shell(false),
use_preemption(true),
preemption_interval(10),
num_isolates(1),
isolate_sources(NULL),
parallel_files(NULL) { }
bool script_executed;
bool last_run;
bool stress_opt;
bool stress_deopt;
bool interactive_shell;
bool test_shell;
bool use_preemption;
int preemption_interval;
int num_isolates;
SourceGroup* isolate_sources;
i::List< i::Vector<const char> >* parallel_files;
};
class Shell: public i::AllStatic {
public:
static bool ExecuteString(Handle<String> source,
@ -129,12 +210,13 @@ class Shell: public i::AllStatic {
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(const char* name);
static Handle<String> ReadFile(const char* name);
static void Initialize(bool test_shell);
static void RenewEvaluationContext();
static void Initialize();
static Persistent<Context> CreateEvaluationContext();
static void InstallUtilityScript();
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static int RunScript(char* filename);
static int RunMain(int argc, char* argv[], bool* executed);
static int RunMain(int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Array> GetCompletions(Handle<String> text,
@ -205,6 +287,8 @@ class Shell: public i::AllStatic {
static const char* kHistoryFileName;
static const char* kPrompt;
static ShellOptions options;
private:
static Persistent<Context> utility_context_;
static Persistent<Context> evaluation_context_;
@ -214,6 +298,7 @@ class Shell: public i::AllStatic {
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static i::Mutex* context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,

7
deps/v8/src/debug.cc

@ -1821,6 +1821,13 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
// If there are no break points this cannot be break at return, as
// the debugger statement and stack guard bebug break cannot be at
// return.
if (!has_break_points_) {
return false;
}
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());

3
deps/v8/src/debug.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -35,6 +35,7 @@
#include "execution.h"
#include "factory.h"
#include "flags.h"
#include "frames-inl.h"
#include "hashmap.h"
#include "platform.h"
#include "string-stream.h"

52
deps/v8/src/deoptimizer.cc

@ -161,8 +161,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Get the "simulated" top and size for the requested frame.
Address top =
reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
unsigned size =
deoptimizer->output_[frame_index]->GetFrameSize() / kPointerSize;
uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
// Done with the GC-unsafe frame descriptions. This re-enables allocation.
deoptimizer->DeleteFrameDescriptions();
@ -547,7 +546,7 @@ void Deoptimizer::MaterializeHeapNumbers() {
#ifdef ENABLE_DEBUGGER_SUPPORT
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, intptr_t size, DeoptimizedFrameInfo* info) {
Address top, uint32_t size, DeoptimizedFrameInfo* info) {
ASSERT_EQ(DEBUGGER, bailout_type_);
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
@ -557,17 +556,29 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address slot = d.slot_address();
if (top <= slot && slot < top + size) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
int expression_index = static_cast<int>(
// Calculate the index with the botton of the expression stack
// at index 0, and the fixed part (including incoming arguments)
// at negative indexes.
int index = static_cast<int>(
info->expression_count_ - (slot - top) / kPointerSize - 1);
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for expression stack index %d\n",
"for stack index %d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
expression_index);
index);
}
if (index >=0) {
info->SetExpression(index, *num);
} else {
// Calculate parameter index subtracting one for the receiver.
int parameter_index =
index +
static_cast<int>(size) / kPointerSize -
info->expression_count_ - 1;
info->SetParameter(parameter_index, *num);
}
info->SetExpression(expression_index, *num);
}
}
}
@ -1126,6 +1137,22 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
}
int FrameDescription::ComputeParametersCount() {
return function_->shared()->formal_parameter_count();
}
Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
ASSERT_EQ(Code::FUNCTION, kind_);
ASSERT(index >= 0);
ASSERT(index < ComputeParametersCount());
// The slot indexes for incoming arguments are negative.
unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
index - ComputeParametersCount());
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
ASSERT_EQ(Code::FUNCTION, kind_);
unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
@ -1415,7 +1442,13 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
DeoptimizedFrameInfo::DeoptimizedFrameInfo(
Deoptimizer* deoptimizer, int frame_index) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
SetFunction(output_frame->GetFunction());
expression_count_ = output_frame->GetExpressionCount(deoptimizer);
parameters_count_ = output_frame->ComputeParametersCount();
parameters_ = new Object*[parameters_count_];
for (int i = 0; i < parameters_count_; i++) {
SetParameter(i, output_frame->GetParameter(deoptimizer, i));
}
expression_stack_ = new Object*[expression_count_];
for (int i = 0; i < expression_count_; i++) {
SetExpression(i, output_frame->GetExpression(deoptimizer, i));
@ -1424,10 +1457,13 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(
DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
delete expression_stack_;
delete[] expression_stack_;
delete[] parameters_;
}
void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
v->VisitPointer(reinterpret_cast<Object**>(&function_));
v->VisitPointers(parameters_, parameters_ + parameters_count_);
v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
}

36
deps/v8/src/deoptimizer.h

@ -194,7 +194,7 @@ class Deoptimizer : public Malloced {
void MaterializeHeapNumbers();
#ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, intptr_t size, DeoptimizedFrameInfo* info);
Address top, uint32_t size, DeoptimizedFrameInfo* info);
#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@ -400,6 +400,12 @@ class FrameDescription {
void SetKind(Code::Kind kind) { kind_ = kind; }
#endif
// Get the incoming arguments count.
int ComputeParametersCount();
// Get a parameter value for an unoptimized frame.
Object* GetParameter(Deoptimizer* deoptimizer, int index);
// Get the expression stack height for a unoptimized frame.
unsigned GetExpressionCount(Deoptimizer* deoptimizer);
@ -662,9 +668,23 @@ class DeoptimizedFrameInfo : public Malloced {
// GC support.
void Iterate(ObjectVisitor* v);
// Return the number of incoming arguments.
int parameters_count() { return parameters_count_; }
// Return the height of the expression stack.
int expression_count() { return expression_count_; }
// Get the frame function.
JSFunction* GetFunction() {
return function_;
}
// Get an incoming argument.
Object* GetParameter(int index) {
ASSERT(0 <= index && index < parameters_count());
return parameters_[index];
}
// Get an expression from the expression stack.
Object* GetExpression(int index) {
ASSERT(0 <= index && index < expression_count());
@ -672,13 +692,27 @@ class DeoptimizedFrameInfo : public Malloced {
}
private:
// Set the frame function.
void SetFunction(JSFunction* function) {
function_ = function;
}
// Set an incoming argument.
void SetParameter(int index, Object* obj) {
ASSERT(0 <= index && index < parameters_count());
parameters_[index] = obj;
}
// Set an expression on the expression stack.
void SetExpression(int index, Object* obj) {
ASSERT(0 <= index && index < expression_count());
expression_stack_[index] = obj;
}
JSFunction* function_;
int parameters_count_;
int expression_count_;
Object** parameters_;
Object** expression_stack_;
friend class Deoptimizer;

19
deps/v8/src/flag-definitions.h

@ -452,14 +452,10 @@ DEFINE_bool(trace_regexp_assembler,
"trace regexp macro assembler calls.")
//
// Logging and profiling only flags
// Logging and profiling flags
//
#undef FLAG
#ifdef ENABLE_LOGGING_AND_PROFILING
#define FLAG FLAG_FULL
#else
#define FLAG FLAG_READONLY
#endif
// log.cc
DEFINE_bool(log, false,
@ -490,19 +486,6 @@ DEFINE_bool(sliding_state_window, false,
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//
// Heap protection flags
// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well.
//
#ifdef ENABLE_HEAP_PROTECTION
#undef FLAG
#define FLAG FLAG_FULL
DEFINE_bool(protect_heap, false,
"Protect/unprotect V8's heap when leaving/entring the VM.")
#endif
//
// Disassembler only flags
//

4
deps/v8/src/frames.cc

@ -36,6 +36,8 @@
#include "scopeinfo.h"
#include "string-stream.h"
#include "allocation-inl.h"
namespace v8 {
namespace internal {
@ -346,7 +348,6 @@ void SafeStackFrameIterator::Reset() {
// -------------------------------------------------------------------------
#ifdef ENABLE_LOGGING_AND_PROFILING
SafeStackTraceFrameIterator::SafeStackTraceFrameIterator(
Isolate* isolate,
Address fp, Address sp, Address low_bound, Address high_bound) :
@ -362,7 +363,6 @@ void SafeStackTraceFrameIterator::Advance() {
if (frame()->is_java_script()) return;
}
}
#endif
Code* StackFrame::GetSafepointData(Isolate* isolate,

2
deps/v8/src/frames.h

@ -843,7 +843,6 @@ class SafeStackFrameIterator BASE_EMBEDDED {
};
#ifdef ENABLE_LOGGING_AND_PROFILING
typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator>
SafeJavaScriptFrameIterator;
@ -855,7 +854,6 @@ class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
Address low_bound, Address high_bound);
void Advance();
};
#endif
class StackFrameLocator BASE_EMBEDDED {

2
deps/v8/src/full-codegen.h

@ -444,7 +444,7 @@ class FullCodeGenerator: public AstVisitor {
TypeofState typeof_state,
Label* slow,
Label* done);
void EmitVariableLoad(Variable* expr);
void EmitVariableLoad(VariableProxy* proxy);
enum ResolveEvalFlag {
SKIP_CONTEXT_LOOKUP,

5
deps/v8/src/handles.cc

@ -543,11 +543,6 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
// associated with the wrapper and get rid of both the wrapper and the
// handle.
static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
#ifdef ENABLE_HEAP_PROTECTION
// Weak reference callbacks are called as if from outside V8. We
// need to reeenter to unprotect the heap.
VMState state(OTHER);
#endif
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();

11
deps/v8/src/heap-profiler.cc

@ -34,7 +34,6 @@ namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
HeapProfiler::HeapProfiler()
: snapshots_(new HeapSnapshotsCollection()),
next_snapshot_uid_(1) {
@ -52,29 +51,21 @@ void HeapProfiler::ResetSnapshots() {
}
#endif // ENABLE_LOGGING_AND_PROFILING
void HeapProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler());
}
#endif
}
void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current();
delete isolate->heap_profiler();
isolate->set_heap_profiler(NULL);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control) {
@ -179,7 +170,5 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
snapshots_->ObjectMoveEvent(from, to);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

8
deps/v8/src/heap-profiler.h

@ -33,8 +33,6 @@
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
class HeapSnapshot;
class HeapSnapshotsCollection;
@ -45,9 +43,6 @@ class HeapSnapshotsCollection;
profiler->call; \
} \
} while (false)
#else
#define HEAP_PROFILE(heap, call) ((void) 0)
#endif // ENABLE_LOGGING_AND_PROFILING
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
@ -56,7 +51,6 @@ class HeapProfiler {
static void Setup();
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
@ -93,8 +87,6 @@ class HeapProfiler {
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
#endif // ENABLE_LOGGING_AND_PROFILING
};
} } // namespace v8::internal

74
deps/v8/src/heap.cc

@ -293,12 +293,11 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::ReportStatisticsBeforeGC() {
// Heap::ReportHeapStatistics will also log NewSpace statistics when
// compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
// following logic is used to avoid double logging.
#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
// compiled --log-gc is set. The following logic is used to avoid
// double logging.
#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
@ -306,23 +305,16 @@ void Heap::ReportStatisticsBeforeGC() {
new_space_.ReportStatistics();
}
if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
#elif defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
ReportHeapStatistics("Before GC");
new_space_.ClearHistograms();
}
#elif defined(ENABLE_LOGGING_AND_PROFILING)
#else
if (FLAG_log_gc) {
new_space_.CollectStatistics();
new_space_.ReportStatistics();
new_space_.ClearHistograms();
}
#endif
#endif // DEBUG
}
#if defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
@ -368,7 +360,6 @@ void Heap::PrintShortHeapStatistics() {
lo_space_->Size(),
lo_space_->Available());
}
#endif
// TODO(1238405): Combine the infrastructure for --heap-stats and
@ -376,20 +367,17 @@ void Heap::PrintShortHeapStatistics() {
void Heap::ReportStatisticsAfterGC() {
// Similar to the before GC, we use some complicated logic to ensure that
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
}
#elif defined(DEBUG)
if (FLAG_heap_stats) ReportHeapStatistics("After GC");
#elif defined(ENABLE_LOGGING_AND_PROFILING)
#else
if (FLAG_log_gc) new_space_.ReportStatistics();
#endif
#endif // DEBUG
}
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::GarbageCollectionPrologue() {
@ -406,11 +394,11 @@ void Heap::GarbageCollectionPrologue() {
}
if (FLAG_gc_verbose) Print();
#endif
#endif // DEBUG
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
#if defined(DEBUG)
ReportStatisticsBeforeGC();
#endif
#endif // DEBUG
LiveObjectList::GCPrologue();
}
@ -447,12 +435,10 @@ void Heap::GarbageCollectionEpilogue() {
symbol_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
symbol_table()->NumberOfElements());
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
#if defined(DEBUG)
ReportStatisticsAfterGC();
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
#endif // DEBUG
isolate_->debug()->AfterGarbageCollection();
#endif
}
@ -1335,15 +1321,12 @@ class ScavengingVisitor : public StaticVisitorBase {
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
enum SizeRestriction { SMALL, UNKNOWN_SIZE };
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
should_record = should_record || FLAG_log_gc;
#endif
if (should_record) {
if (heap->new_space()->Contains(obj)) {
heap->new_space()->RecordAllocation(obj);
@ -1352,7 +1335,6 @@ class ScavengingVisitor : public StaticVisitorBase {
}
}
}
#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
@ -1368,12 +1350,9 @@ class ScavengingVisitor : public StaticVisitorBase {
source->set_map_word(MapWord::FromForwardingAddress(target));
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
#endif
HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
Isolate* isolate = heap->isolate();
if (isolate->logger()->is_logging() ||
CpuProfiler::is_profiling(isolate)) {
@ -1382,7 +1361,6 @@ class ScavengingVisitor : public StaticVisitorBase {
source->address(), target->address()));
}
}
#endif
}
return target;
@ -1558,7 +1536,6 @@ static void InitializeScavengingVisitorsTables() {
void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
// Table was already updated by some isolate.
return;
@ -1584,7 +1561,6 @@ void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
Release_Store(&scavenging_visitors_table_mode_,
LOGGING_AND_PROFILING_ENABLED);
}
#endif
}
@ -5213,28 +5189,6 @@ void Heap::Shrink() {
}
#ifdef ENABLE_HEAP_PROTECTION
void Heap::Protect() {
if (HasBeenSetup()) {
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
space->Protect();
}
}
void Heap::Unprotect() {
if (HasBeenSetup()) {
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
space->Unprotect();
}
}
#endif
void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
ASSERT(callback != NULL);
GCPrologueCallbackPair pair(callback, gc_type);
@ -5930,9 +5884,7 @@ GCTracer::~GCTracer() {
PrintF("\n");
}
#if defined(ENABLE_LOGGING_AND_PROFILING)
heap_->PrintShortHeapStatistics();
#endif
}

10
deps/v8/src/heap.h

@ -409,12 +409,6 @@ class Heap {
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the heap by marking all spaces read-only/writable.
void Protect();
void Unprotect();
#endif
// Allocates and initializes a new JavaScript object based on a
// constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@ -1052,10 +1046,8 @@ class Heap {
void ZapFromSpace();
#endif
#if defined(ENABLE_LOGGING_AND_PROFILING)
// Print short heap statistics.
void PrintShortHeapStatistics();
#endif
// Makes a new symbol object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@ -1514,11 +1506,9 @@ class Heap {
// around a GC).
inline void CompletelyClearInstanceofCache();
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
#endif
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);

15
deps/v8/src/hydrogen-instructions.cc

@ -784,6 +784,21 @@ void HChange::PrintDataTo(StringStream* stream) {
}
HValue* HCheckInstanceType::Canonicalize() {
if (check_ == IS_STRING &&
!value()->type().IsUninitialized() &&
value()->type().IsString()) {
return NULL;
}
if (check_ == IS_SYMBOL &&
value()->IsConstant() &&
HConstant::cast(value())->handle()->IsSymbol()) {
return NULL;
}
return this;
}
void HCheckInstanceType::GetCheckInterval(InstanceType* first,
InstanceType* last) {
ASSERT(is_interval_check());

14
deps/v8/src/hydrogen-instructions.h

@ -2003,14 +2003,7 @@ class HCheckInstanceType: public HUnaryOperation {
virtual void Verify();
#endif
virtual HValue* Canonicalize() {
if (!value()->type().IsUninitialized() &&
value()->type().IsString() &&
check_ == IS_STRING) {
return NULL;
}
return this;
}
virtual HValue* Canonicalize();
bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
void GetCheckInterval(InstanceType* first, InstanceType* last);
@ -3362,8 +3355,9 @@ class HLoadContextSlot: public HUnaryOperation {
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsSmi() &&
!(value->IsConstant() && HConstant::cast(value)->InOldSpace());
return !value->type().IsBoolean()
&& !value->type().IsSmi()
&& !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}

74
deps/v8/src/ia32/code-stubs-ia32.cc

@ -511,25 +511,17 @@ static void IntegerConvert(MacroAssembler* masm,
}
const char* UnaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"UnaryOpStub_%s_%s_%s",
stream->Add("UnaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
UnaryOpIC::GetName(operand_type_));
return name_;
}
@ -914,12 +906,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) {
}
const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@ -928,13 +915,10 @@ const char* BinaryOpStub::GetName() {
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"BinaryOpStub_%s_%s_%s",
stream->Add("BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(operands_type_));
return name_;
}
@ -4380,9 +4364,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
#ifdef ENABLE_LOGGING_AND_PROFILING
Label not_outermost_js, not_outermost_js_2;
#endif
// Setup frame.
__ push(ebp);
@ -4401,7 +4383,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
__ push(Operand::StaticVariable(c_entry_fp));
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
masm->isolate());
@ -4414,7 +4395,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&not_outermost_js);
__ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
#endif
// Call a faked try-block that does the invoke.
__ call(&invoke);
@ -4462,7 +4442,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PopTryHandler();
__ bind(&exit);
#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
__ cmp(Operand(ebx),
@ -4470,7 +4449,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
// Restore the top frame descriptor from the stack.
__ pop(Operand::StaticVariable(ExternalReference(
@ -4732,15 +4710,8 @@ int CompareStub::MinorKey() {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
void CompareStub::PrintName(StringStream* stream) {
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
switch (cc_) {
case less: cc_name = "LT"; break;
@ -4751,35 +4722,12 @@ const char* CompareStub::GetName() {
case not_equal: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
const char* strict_name = "";
if (strict_ && (cc_ == equal || cc_ == not_equal)) {
strict_name = "_STRICT";
}
const char* never_nan_nan_name = "";
if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
never_nan_nan_name = "_NO_NAN";
}
const char* include_number_compare_name = "";
if (!include_number_compare_) {
include_number_compare_name = "_NO_NUMBER";
}
const char* include_smi_compare_name = "";
if (!include_smi_compare_) {
include_smi_compare_name = "_NO_SMI";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s%s",
cc_name,
strict_name,
never_nan_nan_name,
include_number_compare_name,
include_smi_compare_name);
return name_;
bool is_equality = cc_ == equal || cc_ == not_equal;
stream->Add("CompareStub_%s", cc_name);
if (strict_ && is_equality) stream->Add("_STRICT");
if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
if (!include_number_compare_) stream->Add("_NO_NUMBER");
if (!include_smi_compare_) stream->Add("_NO_SMI");
}

53
deps/v8/src/ia32/code-stubs-ia32.h

@ -67,8 +67,7 @@ class UnaryOpStub: public CodeStub {
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(operand_type),
name_(NULL) {
operand_type_(operand_type) {
}
private:
@ -78,19 +77,7 @@ class UnaryOpStub: public CodeStub {
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
UnaryOpIC::GetName(operand_type_));
}
#endif
virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@ -153,8 +140,7 @@ class BinaryOpStub: public CodeStub {
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
result_type_(BinaryOpIC::UNINITIALIZED) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -167,8 +153,7 @@ class BinaryOpStub: public CodeStub {
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@ -184,20 +169,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
BinaryOpIC::GetName(operands_type_));
}
#endif
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@ -415,14 +387,6 @@ class NumberToStringStub: public CodeStub {
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
@ -466,13 +430,6 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
#ifdef DEBUG
void Print() {
PrintF("StringDictionaryLookupStub\n");
}
#endif
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {

2
deps/v8/src/ia32/codegen-ia32.h

@ -53,9 +53,7 @@ class CodeGenerator {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static bool RecordPositions(MacroAssembler* masm,
int pos,

18
deps/v8/src/ia32/full-codegen-ia32.cc

@ -744,7 +744,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(eax);
@ -1064,7 +1064,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var());
EmitVariableLoad(expr);
}
@ -1214,7 +1214,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Record position before possible IC call.
SetSourcePosition(proxy->position());
Variable* var = proxy->var();
// Three cases: non-this global variables, lookup slots, and all other
// types of slots.
Slot* slot = var->AsSlot();
@ -1540,7 +1544,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@ -1769,7 +1773,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ mov(edx, eax);
__ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
@ -2701,13 +2705,11 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ mov(eax, isolate()->factory()->undefined_value());
context()->Plug(eax);
@ -3768,7 +3770,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {

106
deps/v8/src/ia32/ic-ia32.cc

@ -216,105 +216,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeds and we fall through.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ mov(r1, r0);
__ not_(r0);
__ shl(r1, 15);
__ add(r0, Operand(r1));
// hash = hash ^ (hash >> 12);
__ mov(r1, r0);
__ shr(r1, 12);
__ xor_(r0, Operand(r1));
// hash = hash + (hash << 2);
__ lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
__ mov(r1, r0);
__ shr(r1, 4);
__ xor_(r0, Operand(r1));
// hash = hash * 2057;
__ imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
__ mov(r1, r0);
__ shr(r1, 16);
__ xor_(r0, Operand(r1));
// Compute capacity mask.
__ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
__ dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
__ mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
}
__ and_(r2, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
__ cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
__ j(equal, &done);
} else {
__ j(not_equal, miss);
}
}
__ bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
__ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@ -591,8 +492,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
GenerateNumberDictionaryLoad(masm,
&slow_pop_receiver,
__ LoadFromNumberDictionary(&slow_pop_receiver,
ecx,
eax,
ebx,
@ -1200,8 +1100,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ SmiUntag(ebx);
// ebx: untagged index
// Receiver in edx will be clobbered, need to reload it on miss.
GenerateNumberDictionaryLoad(
masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ LoadFromNumberDictionary(
&slow_reload_receiver, eax, ecx, ebx, edx, edi, edi);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1);
__ jmp(&do_call);

1
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1345,6 +1345,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}

7
deps/v8/src/ia32/lithium-gap-resolver-ia32.cc

@ -305,8 +305,13 @@ void LGapResolver::EmitMove(int index) {
} else if (source->IsConstantOperand()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Immediate src = cgen_->ToImmediate(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ Set(dst, src);
} else {
Operand dst = cgen_->ToOperand(destination);
__ mov(dst, src);
__ Set(dst, src);
}
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);

98
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -734,6 +734,104 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
//
// result - holds the result on exit if the load succeeds and we fall through.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
mov(r1, r0);
not_(r0);
shl(r1, 15);
add(r0, Operand(r1));
// hash = hash ^ (hash >> 12);
mov(r1, r0);
shr(r1, 12);
xor_(r0, Operand(r1));
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(r1, r0);
shr(r1, 4);
xor_(r0, Operand(r1));
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(r1, r0);
shr(r1, 16);
xor_(r0, Operand(r1));
// Compute capacity mask.
mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
}
and_(r2, Operand(r1));
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
j(equal, &done);
} else {
j(not_equal, miss);
}
}
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {

9
deps/v8/src/ia32/macro-assembler-ia32.h

@ -352,6 +352,15 @@ class MacroAssembler: public Assembler {
Label* miss);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2,
Register result);
// ---------------------------------------------------------------------------
// Allocation support

3
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -28,6 +28,9 @@
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#include "ia32/assembler-ia32.h"
#include "ia32/assembler-ia32-inl.h"
namespace v8 {
namespace internal {

69
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2679,7 +2679,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- esp[0] : return address
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@ -3137,7 +3140,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// -- esp[0] : return address
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(edx,
Handle<Map>(receiver_map),
@ -3321,6 +3325,64 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
#define __ ACCESS_MASM(masm)
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label slow, miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
__ JumpIfNotSmi(eax, &miss_force_generic);
__ mov(ebx, eax);
__ SmiUntag(ebx);
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Push receiver on the stack to free up a register for the dictionary
// probing.
__ push(edx);
__ LoadFromNumberDictionary(&slow,
ecx,
eax,
ebx,
edx,
edi,
eax);
// Pop receiver before returning.
__ pop(edx);
__ ret(0);
__ bind(&slow);
__ pop(edx);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ jmp(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Handle<Code> miss_force_generic_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET);
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
JSObject::ElementsKind elements_kind) {
@ -3731,7 +3793,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
// -- eax : key
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------

41
deps/v8/src/ic.cc

@ -1097,15 +1097,10 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
}
MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedLoadFastElementStub().TryGetCode();
}
MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck(
MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind) {
return KeyedLoadExternalArrayStub(elements_kind).TryGetCode();
return KeyedLoadElementStub(elements_kind).TryGetCode();
}
@ -1675,7 +1670,7 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
for (int i = 0; i < target_receiver_maps.length(); ++i) {
Map* receiver_map(target_receiver_maps.at(i));
MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
receiver_map, strict_mode, generic_stub);
receiver_map, strict_mode);
Code* cached_stub;
if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
handler_ics.Add(cached_stub);
@ -1694,18 +1689,18 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
StrictModeFlag strict_mode,
Code* generic_stub) {
StrictModeFlag strict_mode) {
if ((receiver_map->instance_type() & kNotStringTag) == 0) {
ASSERT(string_stub() != NULL);
return string_stub();
} else if (receiver_map->has_external_array_elements()) {
return GetExternalArrayStubWithoutMapCheck(receiver_map->elements_kind());
} else if (receiver_map->has_fast_elements()) {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetFastElementStubWithoutMapCheck(is_js_array);
} else {
return generic_stub;
ASSERT(receiver_map->has_dictionary_elements() ||
receiver_map->has_fast_elements() ||
receiver_map->has_fast_double_elements() ||
receiver_map->has_external_array_elements());
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
return GetElementStubWithoutMapCheck(is_js_array,
receiver_map->elements_kind());
}
}
@ -1717,6 +1712,7 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
Code* result = NULL;
if (receiver->HasFastElements() ||
receiver->HasExternalArrayElements() ||
receiver->HasFastDoubleElements() ||
receiver->HasDictionaryElements()) {
MaybeObject* maybe_stub =
isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
@ -1729,15 +1725,10 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
}
MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck(
bool is_js_array) {
return KeyedStoreFastElementStub(is_js_array).TryGetCode();
}
MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck(
MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind) {
return KeyedStoreExternalArrayStub(elements_kind).TryGetCode();
return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
}

21
deps/v8/src/ic.h

@ -345,10 +345,8 @@ class KeyedIC: public IC {
explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
virtual ~KeyedIC() {}
virtual MaybeObject* GetFastElementStubWithoutMapCheck(
bool is_js_array) = 0;
virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind) = 0;
protected:
@ -373,8 +371,7 @@ class KeyedIC: public IC {
MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
Map* receiver_map,
StrictModeFlag strict_mode,
Code* generic_stub);
StrictModeFlag strict_mode);
MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
bool is_store,
@ -415,10 +412,8 @@ class KeyedLoadIC: public KeyedIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
virtual MaybeObject* GetFastElementStubWithoutMapCheck(
bool is_js_array);
virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind);
protected:
@ -568,10 +563,8 @@ class KeyedStoreIC: public KeyedIC {
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
static void GenerateNonStrictArguments(MacroAssembler* masm);
virtual MaybeObject* GetFastElementStubWithoutMapCheck(
bool is_js_array);
virtual MaybeObject* GetExternalArrayStubWithoutMapCheck(
virtual MaybeObject* GetElementStubWithoutMapCheck(
bool is_js_array,
JSObject::ElementsKind elements_kind);
protected:

9
deps/v8/src/isolate.cc

@ -85,13 +85,9 @@ void ThreadLocalTop::InitializeInternal() {
#ifdef USE_SIMULATOR
simulator_ = NULL;
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = NULL;
external_callback_ = NULL;
#endif
#ifdef ENABLE_VMSTATE_TRACKING
current_vm_state_ = EXTERNAL;
#endif
try_catch_handler_address_ = NULL;
context_ = NULL;
thread_id_ = ThreadId::Invalid();
@ -1279,11 +1275,9 @@ Handle<Context> Isolate::GetCallingGlobalContext() {
char* Isolate::ArchiveThread(char* to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
RuntimeProfiler::IsolateExitedJS(this);
}
#endif
memcpy(to, reinterpret_cast<char*>(thread_local_top()),
sizeof(ThreadLocalTop));
InitializeThreadLocal();
@ -1303,12 +1297,10 @@ char* Isolate::RestoreThread(char* from) {
thread_local_top()->simulator_ = Simulator::current(this);
#endif
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
RuntimeProfiler::IsolateEnteredJS(this);
}
ASSERT(context() == NULL || context()->IsContext());
#endif
return from + sizeof(ThreadLocalTop);
}
@ -1627,7 +1619,6 @@ bool Isolate::PreInit() {
#define C(name) isolate_addresses_[Isolate::k_##name] = \
reinterpret_cast<Address>(name());
ISOLATE_ADDRESS_LIST(C)
ISOLATE_ADDRESS_LIST_PROF(C)
#undef C
string_tracker_ = new StringTracker();

41
deps/v8/src/isolate.h

@ -125,14 +125,8 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
C(c_entry_fp_address) \
C(context_address) \
C(pending_exception_address) \
C(external_caught_exception_address)
#ifdef ENABLE_LOGGING_AND_PROFILING
#define ISOLATE_ADDRESS_LIST_PROF(C) \
C(external_caught_exception_address) \
C(js_entry_sp_address)
#else
#define ISOLATE_ADDRESS_LIST_PROF(C)
#endif
// Platform-independent, reliable thread identifier.
@ -252,14 +246,9 @@ class ThreadLocalTop BASE_EMBEDDED {
#endif
#endif // USE_SIMULATOR
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
Address external_callback_; // the external callback we're currently in
#endif
#ifdef ENABLE_VMSTATE_TRACKING
StateTag current_vm_state_;
#endif
// Generated code scratch locations.
int32_t formal_count_;
@ -313,18 +302,6 @@ class HashMap;
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
#define ISOLATE_LOGGING_INIT_LIST(V) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL)
#else
#define ISOLATE_LOGGING_INIT_LIST(V)
#endif
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \
@ -373,8 +350,9 @@ typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
/* SafeStackFrameIterator activations count. */ \
V(int, safe_stack_iterator_counter, 0) \
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
ISOLATE_PLATFORM_INIT_LIST(V) \
ISOLATE_LOGGING_INIT_LIST(V) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@ -445,7 +423,6 @@ class Isolate {
enum AddressId {
#define C(name) k_##name,
ISOLATE_ADDRESS_LIST(C)
ISOLATE_ADDRESS_LIST_PROF(C)
#undef C
k_isolate_address_count
};
@ -620,7 +597,6 @@ class Isolate {
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
#ifdef ENABLE_LOGGING_AND_PROFILING
// Bottom JS entry (see StackTracer::Trace in log.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
@ -628,7 +604,6 @@ class Isolate {
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
}
#endif
// Generated code scratch locations.
void* formal_count_address() { return &thread_local_top_.formal_count_; }
@ -945,16 +920,13 @@ class Isolate {
static const int kJSRegexpStaticOffsetsVectorSize = 50;
#ifdef ENABLE_LOGGING_AND_PROFILING
Address external_callback() {
return thread_local_top_.external_callback_;
}
void set_external_callback(Address callback) {
thread_local_top_.external_callback_ = callback;
}
#endif
#ifdef ENABLE_VMSTATE_TRACKING
StateTag current_vm_state() {
return thread_local_top_.current_vm_state_;
}
@ -980,7 +952,6 @@ class Isolate {
}
thread_local_top_.current_vm_state_ = state;
}
#endif
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
@ -1356,10 +1327,4 @@ inline void Context::mark_out_of_memory() {
} } // namespace v8::internal
// TODO(isolates): Get rid of these -inl.h includes and place them only where
// they're needed.
#include "allocation-inl.h"
#include "zone-inl.h"
#include "frames-inl.h"
#endif // V8_ISOLATE_H_

1
deps/v8/src/jsregexp.cc

@ -491,6 +491,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
bool is_ascii = subject->IsAsciiRepresentation();
EnsureCompiledIrregexp(regexp, is_ascii);
Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,

1
deps/v8/src/jsregexp.h

@ -29,7 +29,6 @@
#define V8_JSREGEXP_H_
#include "allocation.h"
#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {

2
deps/v8/src/lithium-allocator.cc

@ -1024,7 +1024,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
operand = chunk_->DefineConstantOperand(constant);
} else {
ASSERT(!op->EmitAtUses());
LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY);
unalloc->set_virtual_register(op->id());
operand = unalloc;
}

4
deps/v8/src/log-inl.h

@ -34,8 +34,6 @@
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
Script* script) {
if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
@ -51,8 +49,6 @@ Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
}
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

160
deps/v8/src/log-utils.cc

@ -33,101 +33,14 @@
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
LogDynamicBuffer::LogDynamicBuffer(
int block_size, int max_size, const char* seal, int seal_size)
: block_size_(block_size),
max_size_(max_size - (max_size % block_size_)),
seal_(seal),
seal_size_(seal_size),
blocks_(max_size_ / block_size_ + 1),
write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) {
ASSERT(BlocksCount() > 0);
AllocateBlock(0);
for (int i = 1; i < BlocksCount(); ++i) {
blocks_[i] = NULL;
}
}
LogDynamicBuffer::~LogDynamicBuffer() {
for (int i = 0; i < BlocksCount(); ++i) {
DeleteArray(blocks_[i]);
}
}
int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) {
if (buf_size == 0) return 0;
int read_pos = from_pos;
int block_read_index = BlockIndex(from_pos);
int block_read_pos = PosInBlock(from_pos);
int dest_buf_pos = 0;
// Read until dest_buf is filled, or write_pos_ encountered.
while (read_pos < write_pos_ && dest_buf_pos < buf_size) {
const int read_size = Min(write_pos_ - read_pos,
Min(buf_size - dest_buf_pos, block_size_ - block_read_pos));
memcpy(dest_buf + dest_buf_pos,
blocks_[block_read_index] + block_read_pos, read_size);
block_read_pos += read_size;
dest_buf_pos += read_size;
read_pos += read_size;
if (block_read_pos == block_size_) {
block_read_pos = 0;
++block_read_index;
}
}
return dest_buf_pos;
}
int LogDynamicBuffer::Seal() {
WriteInternal(seal_, seal_size_);
is_sealed_ = true;
return 0;
}
int LogDynamicBuffer::Write(const char* data, int data_size) {
if (is_sealed_) {
return 0;
}
if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) {
return WriteInternal(data, data_size);
} else {
return Seal();
}
}
const char* Log::kLogToTemporaryFile = "&";
int LogDynamicBuffer::WriteInternal(const char* data, int data_size) {
int data_pos = 0;
while (data_pos < data_size) {
const int write_size =
Min(data_size - data_pos, block_size_ - block_write_pos_);
memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos,
write_size);
block_write_pos_ += write_size;
data_pos += write_size;
if (block_write_pos_ == block_size_) {
block_write_pos_ = 0;
AllocateBlock(++block_index_);
}
}
write_pos_ += data_size;
return data_size;
}
// Must be the same message as in Logger::PauseProfiler.
const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
Log::Log(Logger* logger)
: write_to_file_(false),
is_stopped_(false),
: is_stopped_(false),
output_handle_(NULL),
ll_output_handle_(NULL),
output_buffer_(NULL),
mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
@ -142,7 +55,6 @@ static void AddIsolateIdIfNeeded(StringStream* stream) {
void Log::Initialize() {
#ifdef ENABLE_LOGGING_AND_PROFILING
mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
@ -166,18 +78,18 @@ void Log::Initialize() {
FLAG_prof_auto = false;
}
bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
|| FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
bool open_log_file = start_logging || FLAG_prof_lazy;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
if (strcmp(FLAG_logfile, "-") == 0) {
OpenStdout();
} else if (strcmp(FLAG_logfile, "*") == 0) {
OpenMemoryBuffer();
// Does nothing for now. Will be removed.
} else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) {
OpenTemporaryFile();
} else {
if (strchr(FLAG_logfile, '%') != NULL ||
!Isolate::Current()->IsDefaultIsolate()) {
@ -222,14 +134,18 @@ void Log::Initialize() {
}
}
}
#endif
}
void Log::OpenStdout() {
ASSERT(!IsEnabled());
output_handle_ = stdout;
write_to_file_ = true;
}
void Log::OpenTemporaryFile() {
ASSERT(!IsEnabled());
output_handle_ = i::OS::OpenTemporaryFile();
}
@ -244,7 +160,6 @@ static const int kLowLevelLogBufferSize = 2 * MB;
void Log::OpenFile(const char* name) {
ASSERT(!IsEnabled());
output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
write_to_file_ = true;
if (FLAG_ll_prof) {
// Open the low-level log file.
size_t len = strlen(name);
@ -257,25 +172,18 @@ void Log::OpenFile(const char* name) {
}
void Log::OpenMemoryBuffer() {
ASSERT(!IsEnabled());
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
write_to_file_ = false;
FILE* Log::Close() {
FILE* result = NULL;
if (output_handle_ != NULL) {
if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) {
fclose(output_handle_);
} else {
result = output_handle_;
}
}
void Log::Close() {
if (write_to_file_) {
if (output_handle_ != NULL) fclose(output_handle_);
output_handle_ = NULL;
if (ll_output_handle_ != NULL) fclose(ll_output_handle_);
ll_output_handle_ = NULL;
} else {
delete output_buffer_;
output_buffer_ = NULL;
}
DeleteArray(message_buffer_);
message_buffer_ = NULL;
@ -284,27 +192,7 @@ void Log::Close() {
mutex_ = NULL;
is_stopped_ = false;
}
int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
if (write_to_file_) return 0;
ASSERT(output_buffer_ != NULL);
ASSERT(from_pos >= 0);
ASSERT(max_size >= 0);
int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size);
ASSERT(actual_size <= max_size);
if (actual_size == 0) return 0;
// Find previous log line boundary.
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
actual_size = static_cast<int>(end_pos - dest_buf + 1);
// If the assertion below is hit, it means that there was no line end
// found --- something wrong has happened.
ASSERT(actual_size > 0);
ASSERT(actual_size <= max_size);
return actual_size;
return result;
}
@ -413,9 +301,7 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = log_->write_to_file_ ?
log_->WriteToFile(log_->message_buffer_, pos_) :
log_->WriteToMemory(log_->message_buffer_, pos_);
const int written = log_->WriteToFile(log_->message_buffer_, pos_);
if (written != pos_) {
log_->stop();
log_->logger_->LogFailure();
@ -423,6 +309,4 @@ void LogMessageBuilder::WriteToLogFile() {
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

105
deps/v8/src/log-utils.h

@ -33,69 +33,11 @@
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
class Logger;
// A memory buffer that increments its size as you write in it. Size
// is incremented with 'block_size' steps, never exceeding 'max_size'.
// During growth, memory contents are never copied. At the end of the
// buffer an amount of memory specified in 'seal_size' is reserved.
// When writing position reaches max_size - seal_size, buffer auto-seals
// itself with 'seal' and allows no further writes. Data pointed by
// 'seal' must be available during entire LogDynamicBuffer lifetime.
//
// An instance of this class is created dynamically by Log.
class LogDynamicBuffer {
public:
LogDynamicBuffer(
int block_size, int max_size, const char* seal, int seal_size);
~LogDynamicBuffer();
// Reads contents of the buffer starting from 'from_pos'. Upon
// return, 'dest_buf' is filled with the data. Actual amount of data
// filled is returned, it is <= 'buf_size'.
int Read(int from_pos, char* dest_buf, int buf_size);
// Writes 'data' to the buffer, making it larger if necessary. If
// data is too big to fit in the buffer, it doesn't get written at
// all. In that case, buffer auto-seals itself and stops to accept
// any incoming writes. Returns amount of data written (it is either
// 'data_size', or 0, if 'data' is too big).
int Write(const char* data, int data_size);
private:
void AllocateBlock(int index) {
blocks_[index] = NewArray<char>(block_size_);
}
int BlockIndex(int pos) const { return pos / block_size_; }
int BlocksCount() const { return BlockIndex(max_size_) + 1; }
int PosInBlock(int pos) const { return pos % block_size_; }
int Seal();
int WriteInternal(const char* data, int data_size);
const int block_size_;
const int max_size_;
const char* seal_;
const int seal_size_;
ScopedVector<char*> blocks_;
int write_pos_;
int block_index_;
int block_write_pos_;
bool is_sealed_;
};
// Functions and data for performing output of log messages.
class Log {
public:
// Performs process-wide initialization.
void Initialize();
@ -103,18 +45,21 @@ class Log {
void stop() { is_stopped_ = true; }
// Frees all resources acquired in Initialize and Open... functions.
void Close();
// See description in include/v8.h.
int GetLogLines(int from_pos, char* dest_buf, int max_size);
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
FILE* Close();
// Returns whether logging is enabled.
bool IsEnabled() {
return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL);
return !is_stopped_ && output_handle_ != NULL;
}
// Size of buffer used for formatting log messages.
static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer;
static const int kMessageBufferSize = 2048;
// This mode is only used in tests, as temporary files are automatically
// deleted on close and thus can't be accessed afterwards.
static const char* kLogToTemporaryFile;
private:
explicit Log(Logger* logger);
@ -125,8 +70,8 @@ class Log {
// Opens file for logging.
void OpenFile(const char* name);
// Opens memory buffer for logging.
void OpenMemoryBuffer();
// Opens a temporary file for logging.
void OpenTemporaryFile();
// Implementation of writing to a log file.
int WriteToFile(const char* msg, int length) {
@ -138,38 +83,16 @@ class Log {
return length;
}
// Implementation of writing to a memory buffer.
int WriteToMemory(const char* msg, int length) {
ASSERT(output_buffer_ != NULL);
return output_buffer_->Write(msg, length);
}
bool write_to_file_;
// Whether logging is stopped (e.g. due to insufficient resources).
bool is_stopped_;
// When logging is active, either output_handle_ or output_buffer_ is used
// to store a pointer to log destination. If logging was opened via OpenStdout
// or OpenFile, then output_handle_ is used. If logging was opened
// via OpenMemoryBuffer, then output_buffer_ is used.
// mutex_ should be acquired before using output_handle_ or output_buffer_.
// When logging is active output_handle_ is used to store a pointer to log
// destination. mutex_ should be acquired before using output_handle_.
FILE* output_handle_;
// Used when low-level profiling is active.
FILE* ll_output_handle_;
LogDynamicBuffer* output_buffer_;
// Size of dynamic buffer block (and dynamic buffer initial size).
static const int kDynamicBufferBlockSize = 65536;
// Maximum size of dynamic buffer.
static const int kMaxDynamicBufferSize = 50 * 1024 * 1024;
// Message to "seal" dynamic buffer with.
static const char* const kDynamicBufferSeal;
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
Mutex* mutex_;
@ -224,8 +147,6 @@ class LogMessageBuilder BASE_EMBEDDED {
int pos_;
};
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
#endif // V8_LOG_UTILS_H_

132
deps/v8/src/log.cc

@ -43,8 +43,6 @@
namespace v8 {
namespace internal {
#ifdef ENABLE_LOGGING_AND_PROFILING
//
// Sliding state window. Updates counters to keep track of the last
// window of kBufferSize states. This is useful to track where we
@ -554,71 +552,54 @@ void Logger::ProfilerBeginEvent() {
msg.WriteToLogFile();
}
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::StringEvent(const char* name, const char* value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedStringEvent(name, value);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,\"%s\"\n", name, value);
msg.WriteToLogFile();
}
#endif
void Logger::IntEvent(const char* name, int value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedIntEvent(name, value);
#endif
}
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedIntPtrTEvent(name, value);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,%d\n", name, value);
msg.WriteToLogFile();
}
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
LogMessageBuilder msg(this);
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
#endif
void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_handles) return;
LogMessageBuilder msg(this);
msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
msg.WriteToLogFile();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ApiEvent is private so all the calls come from the Logger class. It is the
// caller's responsibility to ensure that log is enabled and that
// FLAG_log_api is true.
@ -631,11 +612,9 @@ void Logger::ApiEvent(const char* format, ...) {
va_end(ap);
msg.WriteToLogFile();
}
#endif
void Logger::ApiNamedSecurityCheck(Object* key) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
if (key->IsString()) {
SmartPointer<char> str =
@ -646,14 +625,12 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
} else {
ApiEvent("api,check-security,['no-name']\n");
}
#endif
}
void Logger::SharedLibraryEvent(const char* library_path,
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@ -661,14 +638,12 @@ void Logger::SharedLibraryEvent(const char* library_path,
start,
end);
msg.WriteToLogFile();
#endif
}
void Logger::SharedLibraryEvent(const wchar_t* library_path,
uintptr_t start,
uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
@ -676,11 +651,9 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
start,
end);
msg.WriteToLogFile();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
@ -721,23 +694,19 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
msg.WriteToLogFile();
}
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_regexp) return;
LogMessageBuilder msg(this);
msg.Append("regexp-compile,");
LogRegExpSource(regexp);
msg.Append(in_cache ? ",hit\n" : ",miss\n");
msg.WriteToLogFile();
#endif
}
void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
HandleScope scope;
LogMessageBuilder msg(this);
@ -778,22 +747,18 @@ void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::ApiIndexedSecurityCheck(uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,check-security,%u\n", index);
#endif
}
void Logger::ApiNamedPropertyAccess(const char* tag,
JSObject* holder,
Object* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(name->IsString());
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
@ -802,58 +767,47 @@ void Logger::ApiNamedPropertyAccess(const char* tag,
SmartPointer<char> property_name =
String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
#endif
}
void Logger::ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
uint32_t index) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = holder->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
#endif
}
void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
String* class_name_obj = object->class_name();
SmartPointer<char> class_name =
class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
#endif
}
void Logger::ApiEntryCall(const char* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_api) return;
ApiEvent("api,%s\n", name);
#endif
}
void Logger::NewEvent(const char* name, void* object, size_t size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
static_cast<unsigned int>(size));
msg.WriteToLogFile();
#endif
}
void Logger::DeleteEvent(const char* name, void* object) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
msg.WriteToLogFile();
#endif
}
@ -866,7 +820,6 @@ void Logger::DeleteEventStatic(const char* name, void* object) {
LOGGER->DeleteEvent(name, object);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
@ -879,43 +832,35 @@ void Logger::CallbackEventInternal(const char* prefix, const char* name,
msg.Append('\n');
msg.WriteToLogFile();
}
#endif
void Logger::CallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("", *str, entry_point);
#endif
}
void Logger::GetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("get ", *str, entry_point);
#endif
}
void Logger::SetterCallbackEvent(String* name, Address entry_point) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_code) return;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CallbackEventInternal("set ", *str, entry_point);
#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -945,14 +890,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -977,11 +920,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ComputeMarker must only be used when SharedFunctionInfo is known.
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
@ -990,14 +931,12 @@ static const char* ComputeMarker(Code* code) {
default: return "";
}
}
#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -1029,7 +968,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
@ -1040,7 +978,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -1078,12 +1015,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(",%s", ComputeMarker(code));
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -1106,21 +1041,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::CodeMovingGCEvent() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag));
OS::SignalCodeMovingGC();
#endif
}
void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof || Serializer::enabled()) {
name_buffer_->Reset();
@ -1145,36 +1076,30 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.Append('\"');
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
address_to_name_map_->Move(from, to);
}
MoveEventInternal(CODE_MOVE_EVENT, from, to);
#endif
}
void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
address_to_name_map_->Remove(from);
}
DeleteEventInternal(CODE_DELETE_EVENT, from);
#endif
}
void Logger::SnapshotPositionEvent(Address addr, int pos) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos);
if (Serializer::enabled() && address_to_name_map_ != NULL) {
@ -1196,18 +1121,14 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
msg.Append(",%d", pos);
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
@ -1220,10 +1141,8 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
msg.Append('\n');
msg.WriteToLogFile();
}
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg(this);
@ -1232,11 +1151,9 @@ void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
msg.Append('\n');
msg.WriteToLogFile();
}
#endif
void Logger::ResourceEvent(const char* name, const char* tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("%s,%s,", name, tag);
@ -1249,12 +1166,10 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::SuspectReadEvent(String* name, Object* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
LogMessageBuilder msg(this);
String* class_name = obj->IsJSObject()
@ -1268,12 +1183,10 @@ void Logger::SuspectReadEvent(String* name, Object* obj) {
msg.Append('"');
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
// Using non-relative system time in order to be able to synchronize with
@ -1281,42 +1194,34 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
space, kind, OS::TimeCurrentMillis());
msg.WriteToLogFile();
#endif
}
void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
msg.WriteToLogFile();
#endif
}
void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg(this);
msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
msg.WriteToLogFile();
#endif
}
void Logger::DebugTag(const char* call_site_tag) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
LogMessageBuilder msg(this);
msg.Append("debug-tag,%s\n", call_site_tag);
msg.WriteToLogFile();
#endif
}
void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_log) return;
StringBuilder s(parameter.length() + 1);
for (int i = 0; i < parameter.length(); ++i) {
@ -1330,11 +1235,9 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
parameter_string);
DeleteArray(parameter_string);
msg.WriteToLogFile();
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg(this);
@ -1378,7 +1281,6 @@ void Logger::PauseProfiler() {
ticker_->Stop();
}
FLAG_log_code = false;
// Must be the same message as Log::kDynamicBufferSeal.
LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
--logging_nesting_;
@ -1420,11 +1322,6 @@ bool Logger::IsProfilerSamplerActive() {
}
int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
return log_->GetLogLines(from_pos, dest_buf, max_size);
}
class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
public:
EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
@ -1545,7 +1442,6 @@ void Logger::LogCodeObject(Object* object) {
void Logger::LogCodeInfo() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!log_->IsEnabled() || !FLAG_ll_prof) return;
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
@ -1557,7 +1453,6 @@ void Logger::LogCodeInfo() {
const char arch[] = "unknown";
#endif
LowLevelLogWriteBytes(arch, sizeof(arch));
#endif // ENABLE_LOGGING_AND_PROFILING
}
@ -1710,11 +1605,8 @@ void Logger::LogAccessorCallbacks() {
}
}
#endif
bool Logger::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
is_initialized_ = true;
@ -1766,40 +1658,27 @@ bool Logger::Setup() {
}
return true;
#else
return false;
#endif
}
Sampler* Logger::sampler() {
#ifdef ENABLE_LOGGING_AND_PROFILING
return ticker_;
#else
return NULL;
#endif
}
void Logger::EnsureTickerStarted() {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
if (!ticker_->IsActive()) ticker_->Start();
#endif
}
void Logger::EnsureTickerStopped() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
#endif
}
void Logger::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!is_initialized_) return;
FILE* Logger::TearDown() {
if (!is_initialized_) return NULL;
is_initialized_ = false;
// Stop the profiler before closing the file.
@ -1815,13 +1694,11 @@ void Logger::TearDown() {
delete ticker_;
ticker_ = NULL;
log_->Close();
#endif
return log_->Close();
}
void Logger::EnableSlidingStateWindow() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// If the ticker is NULL, Logger::Setup has not been called yet. In
// that case, we set the sliding_state_window flag so that the
// sliding window computation will be started when Logger::Setup is
@ -1835,7 +1712,6 @@ void Logger::EnableSlidingStateWindow() {
if (sliding_state_window_ == NULL) {
sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
}
#endif
}
@ -1855,10 +1731,8 @@ bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
#ifdef ENABLE_LOGGING_AND_PROFILING
bool* flag = reinterpret_cast<bool*>(flag_ptr);
*flag |= sampler->IsProfiling();
#endif
}

16
deps/v8/src/log.h

@ -78,7 +78,6 @@ class SlidingStateWindow;
class Ticker;
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(isolate, Call) \
do { \
v8::internal::Logger* logger = \
@ -86,9 +85,6 @@ class Ticker;
if (logger->is_logging()) \
logger->Call; \
} while (false)
#else
#define LOG(isolate, Call) ((void) 0)
#endif
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation") \
@ -161,7 +157,9 @@ class Logger {
Sampler* sampler();
// Frees resources acquired in Setup.
void TearDown();
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
FILE* TearDown();
// Enable the computation of a sliding window of states.
void EnableSlidingStateWindow();
@ -272,7 +270,6 @@ class Logger {
// Log an event reported from generated code
void LogRuntime(Vector<const char> format, JSArray* args);
#ifdef ENABLE_LOGGING_AND_PROFILING
bool is_logging() {
return logging_nesting_ > 0;
}
@ -284,10 +281,6 @@ class Logger {
void ResumeProfiler();
bool IsProfilerPaused();
// If logging is performed into a memory buffer, allows to
// retrieve previously written messages. See v8.h.
int GetLogLines(int from_pos, char* dest_buf, int max_size);
// Logs all compiled functions found in the heap.
void LogCompiledFunctions();
// Logs all accessor callbacks found in the heap.
@ -424,9 +417,6 @@ class Logger {
Address prev_code_;
friend class CpuProfiler;
#else
bool is_logging() { return false; }
#endif
};

7
deps/v8/src/mark-compact.cc

@ -1661,7 +1661,9 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Clear dead prototype transitions.
int number_of_transitions = map->NumberOfProtoTransitions();
FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
if (number_of_transitions > 0) {
FixedArray* prototype_transitions =
map->unchecked_prototype_transitions();
int new_number_of_transitions = 0;
const int header = Map::kProtoTransitionHeaderSize;
const int proto_offset =
@ -1687,6 +1689,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
}
new_number_of_transitions++;
}
}
// Fill slots that became free with undefined value.
Object* undefined = heap()->raw_unchecked_undefined_value();
@ -3255,11 +3258,9 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
}
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(isolate, CodeDeleteEvent(obj->address()));
}
#endif
}

1
deps/v8/src/messages.js

@ -195,6 +195,7 @@ function FormatMessage(message) {
non_extensible_proto: ["%0", " is not extensible"],
handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
handler_failed: ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
proxy_prop_not_configurable: ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
proxy_non_object_prop_names: ["Trap ", "%1", " returned non-object ", "%0"],
proxy_repeated_prop_name: ["Trap ", "%1", " returned repeated property name ", "%2"],

9
deps/v8/src/mips/assembler-mips.h

@ -779,8 +779,13 @@ class Assembler : public AssemblerBase {
void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
}
// Check the number of instructions generated from label to here.
int InstructionsGeneratedSince(Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Class for scoping postponing the trampoline pool generation.

5
deps/v8/src/mips/builtins-mips.cc

@ -634,7 +634,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
__ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(Operand(t9));
__ Jump(t9);
// a0: number of arguments
// a1: called object
@ -1075,8 +1075,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
__ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
__ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION,

102
deps/v8/src/mips/code-stubs-mips.cc

@ -305,12 +305,6 @@ class ConvertToDoubleStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "ConvertToDoubleStub"; }
#ifdef DEBUG
void Print() { PrintF("ConvertToDoubleStub\n"); }
#endif
};
@ -396,11 +390,11 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, a0);
ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
__ push(ra);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub1.GetCode());
// Write Smi from a1 to a1 and a0 in double format.
__ mov(scratch1, a1);
ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub2.GetCode());
__ pop(ra);
}
}
@ -482,7 +476,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, object);
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(ra);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub.GetCode());
__ pop(ra);
}
@ -1107,7 +1101,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, rhs);
ConvertToDoubleStub stub1(a1, a0, t6, t5);
__ push(ra);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub1.GetCode());
__ pop(ra);
}
@ -1142,7 +1136,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, lhs);
ConvertToDoubleStub stub2(a3, a2, t6, t5);
__ push(ra);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ Call(stub2.GetCode());
__ pop(ra);
// Load rhs to a double in a1, a0.
if (rhs.is(a0)) {
@ -1803,25 +1797,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
}
const char* UnaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"UnaryOpStub_%s_%s_%s",
stream->Add("UnaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
UnaryOpIC::GetName(operand_type_));
return name_;
}
@ -2160,12 +2146,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) {
}
const char* BinaryOpStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@ -2174,13 +2155,10 @@ const char* BinaryOpStub::GetName() {
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"BinaryOpStub_%s_%s_%s",
stream->Add("BinaryOpStub_%s_%s_%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(operands_type_));
return name_;
}
@ -3749,7 +3727,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// 4 args slots
// args
#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
@ -3766,7 +3743,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
__ push(t0);
#endif
// Call a faked try-block that does the invoke.
__ bal(&invoke); // bal exposes branch delay slot.
@ -3835,7 +3811,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ PopTryHandler();
__ bind(&exit); // v0 holds result
#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
@ -3844,7 +3819,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
__ bind(&non_outermost_js_2);
#endif
// Restore the top frame descriptors from the stack.
__ pop(t1);
@ -4592,10 +4566,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it
// contains the hole.
__ GetObjectType(t9, a0, a0);
__ Branch(&runtime, ne, a0, Operand(CODE_TYPE));
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(t9, &runtime);
// a3: encoding of subject string (1 if ASCII, 0 if two_byte);
// t9: code
@ -4947,16 +4920,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* CompareStub::GetName() {
void CompareStub::PrintName(StringStream* stream) {
ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
(lhs_.is(a1) && rhs_.is(a0)));
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* cc_name;
switch (cc_) {
case lt: cc_name = "LT"; break;
@ -4967,40 +4933,14 @@ const char* CompareStub::GetName() {
case ne: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1";
const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1";
const char* strict_name = "";
if (strict_ && (cc_ == eq || cc_ == ne)) {
strict_name = "_STRICT";
}
const char* never_nan_nan_name = "";
if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
never_nan_nan_name = "_NO_NAN";
}
const char* include_number_compare_name = "";
if (!include_number_compare_) {
include_number_compare_name = "_NO_NUMBER";
}
const char* include_smi_compare_name = "";
if (!include_smi_compare_) {
include_smi_compare_name = "_NO_SMI";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"CompareStub_%s%s%s%s%s%s",
cc_name,
lhs_name,
rhs_name,
strict_name,
never_nan_nan_name,
include_number_compare_name,
include_smi_compare_name);
return name_;
bool is_equality = cc_ == eq || cc_ == ne;
stream->Add("CompareStub_%s", cc_name);
stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
if (strict_ && is_equality) stream->Add("_STRICT");
if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
if (!include_number_compare_) stream->Add("_NO_NUMBER");
if (!include_smi_compare_) stream->Add("_NO_SMI");
}

63
deps/v8/src/mips/code-stubs-mips.h

@ -66,8 +66,7 @@ class UnaryOpStub: public CodeStub {
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
operand_type_(operand_type),
name_(NULL) {
operand_type_(operand_type) {
}
private:
@ -77,19 +76,7 @@ class UnaryOpStub: public CodeStub {
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
UnaryOpIC::GetName(operand_type_));
}
#endif
virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@ -143,8 +130,7 @@ class BinaryOpStub: public CodeStub {
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
result_type_(BinaryOpIC::UNINITIALIZED) {
use_fpu_ = CpuFeatures::IsSupported(FPU);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -157,8 +143,7 @@ class BinaryOpStub: public CodeStub {
mode_(ModeBits::decode(key)),
use_fpu_(FPUBits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@ -174,20 +159,7 @@ class BinaryOpStub: public CodeStub {
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
BinaryOpIC::GetName(operands_type_));
}
#endif
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@ -374,12 +346,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
@ -406,14 +372,6 @@ class NumberToStringStub: public CodeStub {
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
@ -431,8 +389,6 @@ class RegExpCEntryStub: public CodeStub {
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "RegExpCEntryStub"; }
};
// Trampoline stub to call into native code. To call safely into native code
@ -453,8 +409,6 @@ class DirectCEntryStub: public CodeStub {
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "DirectCEntryStub"; }
};
class FloatingPointHelper : public AllStatic {
@ -636,13 +590,6 @@ class StringDictionaryLookupStub: public CodeStub {
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
#ifdef DEBUG
void Print() {
PrintF("StringDictionaryLookupStub\n");
}
#endif
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {

2
deps/v8/src/mips/codegen-mips.h

@ -60,9 +60,7 @@ class CodeGenerator: public AstVisitor {
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,

67
deps/v8/src/mips/full-codegen-mips.cc

@ -783,7 +783,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(result_register());
@ -798,7 +798,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
__ CallWithAstId(ic);
__ Call(ic);
// Value in v0 is ignored (declarations are statements).
}
}
@ -873,7 +873,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId());
__ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ Branch(&next_test, ne, v0, Operand(zero_reg));
@ -1117,7 +1117,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr->var());
EmitVariableLoad(expr);
}
@ -1173,7 +1173,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ CallWithAstId(ic, mode);
__ Call(ic, mode);
}
@ -1253,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
__ li(a0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ Branch(done);
}
}
@ -1262,7 +1262,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
}
void FullCodeGenerator::EmitVariableLoad(Variable* var) {
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Record position before possible IC call.
SetSourcePosition(proxy->position());
Variable* var = proxy->var();
// Three cases: non-this global variables, lookup slots, and all other
// types of slots.
Slot* slot = var->AsSlot();
@ -1275,7 +1279,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) {
__ lw(a0, GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(v0);
} else if (slot->type() == Slot::LOOKUP) {
@ -1421,7 +1425,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id());
__ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@ -1598,7 +1602,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy()->var());
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@ -1665,7 +1669,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(a2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name a0 and a2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1674,7 +1678,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
__ mov(a0, result_register());
// Call keyed load IC. It has arguments key and receiver in a0 and a1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@ -1702,7 +1706,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
__ jmp(&done);
@ -1785,7 +1789,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
context()->Plug(v0);
}
@ -1826,7 +1830,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
__ CallWithAstId(ic);
__ Call(ic);
break;
}
case KEYED_PROPERTY: {
@ -1839,7 +1843,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
__ CallWithAstId(ic);
__ Call(ic);
break;
}
}
@ -1864,7 +1868,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT);
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -1962,7 +1966,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2014,7 +2018,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@ -2067,7 +2071,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr,
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
__ CallWithAstId(ic, mode, expr->id());
__ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2101,7 +2105,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@ -2301,7 +2305,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ lw(a1, GlobalObjectOperand());
__ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
__ Push(v0, a1); // Function, receiver.
@ -2780,13 +2784,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
#endif
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
context()->Plug(v0);
@ -3664,7 +3667,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
__ CallWithAstId(ic, mode, expr->id());
__ Call(ic, mode, expr->id());
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@ -3807,7 +3810,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(v0);
}
@ -3839,7 +3842,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@ -3918,7 +3921,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
__ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
patch_site.EmitPatchInfo();
__ bind(&done);
@ -3951,7 +3954,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3969,7 +3972,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@ -3993,7 +3996,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ CallWithAstId(ic);
__ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(v0);
} else if (proxy != NULL &&
@ -4190,7 +4193,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
__ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id());
__ Call(ic, RelocInfo::CODE_TARGET, expr->id());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);

116
deps/v8/src/mips/ic-mips.cc

@ -214,115 +214,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// reg0 - holds the untagged key on entry and holds the hash once computed.
//
// reg1 - Used to hold the capacity mask of the dictionary.
//
// reg2 - Used for the index into the dictionary.
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ nor(reg1, reg0, zero_reg);
__ sll(at, reg0, 15);
__ addu(reg0, reg1, at);
// hash = hash ^ (hash >> 12);
__ srl(at, reg0, 12);
__ xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
__ sll(at, reg0, 2);
__ addu(reg0, reg0, at);
// hash = hash ^ (hash >> 4);
__ srl(at, reg0, 4);
__ xor_(reg0, reg0, at);
// hash = hash * 2057;
__ li(reg1, Operand(2057));
__ mul(reg0, reg0, reg1);
// hash = hash ^ (hash >> 16);
__ srl(at, reg0, 16);
__ xor_(reg0, reg0, at);
// Compute the capacity mask.
__ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
__ sra(reg1, reg1, kSmiTagSize);
__ Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
__ mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
}
__ and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ sll(at, reg2, 1); // 2x.
__ addu(reg2, reg2, at); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
__ sll(at, reg2, kPointerSizeLog2);
__ addu(reg2, elements, at);
__ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
if (i != kProbes - 1) {
__ Branch(&done, eq, key, Operand(at));
} else {
__ Branch(miss, ne, key, Operand(at));
}
}
__ bind(&done);
// Check that the value is a normal property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
__ lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
__ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
__ lw(result, FieldMemOperand(reg2, kValueOffset));
}
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a2 : name
@ -751,7 +642,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ Branch(&slow_load, ne, a3, Operand(at));
__ sra(a0, a2, kSmiTagSize);
// a0: untagged index
GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1);
__ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
__ jmp(&do_call);
@ -963,6 +854,9 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
Register backing_store = parameter_map;
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
DONT_DO_SMI_CHECK);
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
__ li(scratch, Operand(kPointerSize >> 1));
@ -1136,7 +1030,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kHashTableMapRootIndex);
__ Branch(&slow, ne, a3, Operand(at));
__ sra(a2, a0, kSmiTagSize);
GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1);
__ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
__ Ret();
// Slow case, key and receiver still in a0 and a1.

536
deps/v8/src/mips/macro-assembler-mips.cc

@ -50,87 +50,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
}
// Arguments macros.
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
#define REGISTER_TARGET_BODY(Name) \
void MacroAssembler::Name(Register target, \
BranchDelaySlot bd) { \
Name(Operand(target), bd); \
} \
void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
BranchDelaySlot bd) { \
Name(Operand(target), COND_ARGS, bd); \
}
#define INT_PTR_TARGET_BODY(Name) \
void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
BranchDelaySlot bd) { \
Name(Operand(target, rmode), bd); \
} \
void MacroAssembler::Name(intptr_t target, \
RelocInfo::Mode rmode, \
COND_TYPED_ARGS, \
BranchDelaySlot bd) { \
Name(Operand(target, rmode), COND_ARGS, bd); \
}
#define BYTE_PTR_TARGET_BODY(Name) \
void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
BranchDelaySlot bd) { \
Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
} \
void MacroAssembler::Name(byte* target, \
RelocInfo::Mode rmode, \
COND_TYPED_ARGS, \
BranchDelaySlot bd) { \
Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
}
#define CODE_TARGET_BODY(Name) \
void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
BranchDelaySlot bd) { \
Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
} \
void MacroAssembler::Name(Handle<Code> target, \
RelocInfo::Mode rmode, \
COND_TYPED_ARGS, \
BranchDelaySlot bd) { \
Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
}
REGISTER_TARGET_BODY(Jump)
REGISTER_TARGET_BODY(Call)
INT_PTR_TARGET_BODY(Jump)
INT_PTR_TARGET_BODY(Call)
BYTE_PTR_TARGET_BODY(Jump)
BYTE_PTR_TARGET_BODY(Call)
CODE_TARGET_BODY(Jump)
CODE_TARGET_BODY(Call)
#undef COND_TYPED_ARGS
#undef COND_ARGS
#undef REGISTER_TARGET_BODY
#undef BYTE_PTR_TARGET_BODY
#undef CODE_TARGET_BODY
void MacroAssembler::Ret(BranchDelaySlot bd) {
Jump(Operand(ra), bd);
}
void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
BranchDelaySlot bd) {
Jump(Operand(ra), cond, r1, r2, bd);
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
@ -424,6 +343,114 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// reg0 - holds the untagged key on entry and holds the hash once computed.
//
// reg1 - Used to hold the capacity mask of the dictionary.
//
// reg2 - Used for the index into the dictionary.
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
nor(reg1, reg0, zero_reg);
sll(at, reg0, 15);
addu(reg0, reg1, at);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
sll(at, reg0, 2);
addu(reg0, reg0, at);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
xor_(reg0, reg0, at);
// hash = hash * 2057;
li(reg1, Operand(2057));
mul(reg0, reg0, reg1);
// hash = hash ^ (hash >> 16);
srl(at, reg0, 16);
xor_(reg0, reg0, at);
// Compute the capacity mask.
lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
sra(reg1, reg1, kSmiTagSize);
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i)));
}
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
sll(at, reg2, 1); // 2x.
addu(reg2, reg2, at); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
sll(at, reg2, kPointerSizeLog2);
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset));
if (i != kProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
}
}
bind(&done);
// Check that the value is a normal property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
lw(result, FieldMemOperand(reg2, kValueOffset));
}
// ---------------------------------------------------------------------------
// Instruction macros.
@ -1901,224 +1928,231 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
}
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
void MacroAssembler::Jump(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm28;
imm28 = jump_address(L);
imm28 &= kImm28Mask;
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
j(imm28);
if (cond == cc_always) {
jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(target);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
if (bd == PROTECT)
nop();
}
void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
void MacroAssembler::Jump(intptr_t target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
li(t9, Operand(target, rmode));
Jump(t9, cond, rs, rt, bd);
}
jr(at);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::Jump(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
void MacroAssembler::Jump(Handle<Code> code,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
jalr(at);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
int MacroAssembler::CallSize(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target.is_reg()) {
jr(target.rm());
} else {
if (!MustUseReg(target.rmode_)) {
j(target.imm32_);
if (cond == cc_always) {
size += 1;
} else {
li(t9, target);
jr(t9);
}
size += 3;
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
if (bd == PROTECT)
size += 1;
return size * kInstrSize;
}
void MacroAssembler::Jump(const Operand& target,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
Label start;
bind(&start);
if (cond == cc_always) {
jr(target.rm());
} else {
Branch(2, NegateCondition(cond), rs, rt);
jr(target.rm());
}
} else { // Not register target.
if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseReg(target).
li(t9, target);
if (cond == cc_always) {
jr(t9);
jalr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(t9); // Will generate only one instruction.
}
}
jalr(target);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
if (bd == PROTECT)
nop();
}
int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
return 4 * kInstrSize;
ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Register reg) {
return 2 * kInstrSize;
int MacroAssembler::CallSize(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 2 * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target.is_reg()) {
jalr(target.rm());
} else { // !target.is_reg().
if (!MustUseReg(target.rmode_)) {
jal(target.imm32_);
} else { // MustUseReg(target).
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
// Must record previous source positions before the
// li() generates a new code target.
positions_recorder()->WriteRecordedPositions();
li(t9, target);
jalr(t9);
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
li(t9, Operand(target_int, rmode), true);
Call(t9, cond, rs, rt, bd);
ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(const Operand& target,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
} else { // !target.is_reg().
if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
Branch(2, NegateCondition(cond), rs, rt);
jal(target.imm32_); // Will generate only one instruction.
}
} else { // MustUseReg(target)
li(t9, target);
if (cond == cc_always) {
jalr(t9);
} else {
Branch(2, NegateCondition(cond), rs, rt);
jalr(t9); // Will generate only one instruction.
}
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
void MacroAssembler::CallWithAstId(Handle<Code> code,
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond,
Register r1,
const Operand& r2) {
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
ASSERT(ast_id_for_reloc_info_ == kNoASTId);
ast_id_for_reloc_info_ = ast_id;
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::Drop(int count,
Condition cond,
Register reg,
const Operand& op) {
if (count <= 0) {
return;
void MacroAssembler::Ret(Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
}
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm28;
imm28 = jump_address(L);
imm28 &= kImm28Mask;
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
j(imm28);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
if (count > 0) {
addiu(sp, sp, count * kPointerSize);
void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
jr(at);
if (cond != al) {
bind(&skip);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
jalr(at);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
@ -2143,6 +2177,29 @@ void MacroAssembler::DropAndRet(int drop,
}
void MacroAssembler::Drop(int count,
Condition cond,
Register reg,
const Operand& op) {
if (count <= 0) {
return;
}
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
}
addiu(sp, sp, count * kPointerSize);
if (cond != al) {
bind(&skip);
}
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
@ -2804,7 +2861,7 @@ void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
}
@ -2979,9 +3036,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(t1, call_kind);
Call(adaptor, RelocInfo::CODE_TARGET);
Call(adaptor);
call_wrapper.AfterCall();
jmp(done);
} else {
@ -3178,7 +3235,7 @@ void MacroAssembler::GetObjectType(Register object,
void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
Register r1, const Operand& r2) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2);
Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
}
@ -3189,7 +3246,8 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
kNoASTId, cond, r1, r2);
return result;
}

104
deps/v8/src/mips/macro-assembler-mips.h

@ -103,39 +103,6 @@ class MacroAssembler: public Assembler {
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
#define COND_ARGS cond, r1, r2
// Prototypes.
// Prototypes for functions with no target (eg Ret()).
#define DECLARE_NOTARGET_PROTOTYPE(Name) \
void Name(BranchDelaySlot bd = PROTECT); \
void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
Name(COND_ARGS, bd); \
}
// Prototypes for functions with a target.
// Cases when relocation may be needed.
#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, \
RelocInfo::Mode rmode, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, \
target_type target, \
RelocInfo::Mode rmode) { \
Name(target, rmode, bd); \
} \
void Name(target_type target, \
RelocInfo::Mode rmode, \
COND_TYPED_ARGS, \
BranchDelaySlot bd = PROTECT); \
inline void Name(BranchDelaySlot bd, \
target_type target, \
RelocInfo::Mode rmode, \
COND_TYPED_ARGS) { \
Name(target, rmode, COND_ARGS, bd); \
}
// Cases when relocation is not needed.
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
void Name(target_type target, BranchDelaySlot bd = PROTECT); \
@ -151,44 +118,44 @@ class MacroAssembler: public Assembler {
Name(target, COND_ARGS, bd); \
}
// Target prototypes.
#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Register) \
DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
DECLARE_RELOC_PROTOTYPE(Name, byte*) \
DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
#define DECLARE_BRANCH_PROTOTYPES(Name) \
DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
DECLARE_JUMP_CALL_PROTOTYPES(Jump)
DECLARE_JUMP_CALL_PROTOTYPES(Call)
DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
DECLARE_NOTARGET_PROTOTYPE(Ret)
#undef DECLARE_BRANCH_PROTOTYPES
#undef COND_TYPED_ARGS
#undef COND_ARGS
#undef DECLARE_NOTARGET_PROTOTYPE
#undef DECLARE_NORELOC_PROTOTYPE
#undef DECLARE_RELOC_PROTOTYPE
#undef DECLARE_JUMP_CALL_PROTOTYPES
#undef DECLARE_BRANCH_PROTOTYPES
void CallWithAstId(Handle<Code> code,
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void Jump(Register target, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Register target, COND_ARGS);
void Call(Register target, COND_ARGS);
int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg));
COND_ARGS);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
COND_ARGS);
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd) {
Ret(al, zero_reg, Operand(zero_reg), bd);
}
int CallSize(Register reg);
int CallSize(Handle<Code> code, RelocInfo::Mode rmode);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
@ -299,6 +266,16 @@ DECLARE_NOTARGET_PROTOTYPE(Ret)
Register scratch,
Label* miss);
void LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2);
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@ -1125,17 +1102,6 @@ DECLARE_NOTARGET_PROTOTYPE(Ret)
void Jr(Label* L, BranchDelaySlot bdslot);
void Jalr(Label* L, BranchDelaySlot bdslot);
void Jump(intptr_t target, RelocInfo::Mode rmode,
BranchDelaySlot bd = PROTECT);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
BranchDelaySlot bd = PROTECT);
void Call(intptr_t target, RelocInfo::Mode rmode,
BranchDelaySlot bd = PROTECT);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,

4
deps/v8/src/mips/regexp-macro-assembler-mips.cc

@ -179,7 +179,7 @@ void RegExpMacroAssemblerMIPS::Backtrack() {
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(a0);
__ Addu(a0, a0, code_pointer());
__ Jump(Operand(a0));
__ Jump(a0);
}
@ -1238,7 +1238,7 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
__ Call(t9);
__ lw(ra, MemOperand(sp, 0));
__ Addu(sp, sp, Operand(stack_alignment));
__ Jump(Operand(ra));
__ Jump(ra);
}

7
deps/v8/src/mips/regexp-macro-assembler-mips.h

@ -29,6 +29,12 @@
#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#include "mips/assembler-mips.h"
#include "mips/assembler-mips-inl.h"
#include "macro-assembler.h"
#include "code.h"
#include "mips/macro-assembler-mips.h"
namespace v8 {
namespace internal {
@ -249,4 +255,3 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
}} // namespace v8::internal
#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_

58
deps/v8/src/mips/stub-cache-mips.cc

@ -3099,7 +3099,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// -- a1 : receiver
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a1,
a2,
@ -3190,7 +3191,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// -- a3 : scratch
// -----------------------------------
Code* stub;
MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map);
JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(a2,
a3,
@ -3390,6 +3394,54 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
#define __ ACCESS_MASM(masm)
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Label slow, miss_force_generic;
Register key = a0;
Register receiver = a1;
__ JumpIfNotSmi(key, &miss_force_generic);
__ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ sra(a2, a0, kSmiTagSize);
__ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
__ Ret();
// Slow case, key and receiver still in a0 and a1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, a2, a3);
// Entry registers are intact.
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ Jump(slow_ic, RelocInfo::CODE_TARGET);
// Miss case, call the runtime.
__ bind(&miss_force_generic);
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
}
static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
switch (elements_kind) {
case JSObject::EXTERNAL_BYTE_ELEMENTS:
@ -4201,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
__ JumpIfNotSmi(a0, &miss_force_generic);
__ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ lw(elements_reg,

3
deps/v8/src/mksnapshot.cc

@ -296,10 +296,9 @@ class BZip2Decompressor : public StartupDataDecompressor {
int main(int argc, char** argv) {
#ifdef ENABLE_LOGGING_AND_PROFILING
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
#endif
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);

16
deps/v8/src/objects-visiting.h

@ -30,6 +30,22 @@
#include "allocation.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/assembler-ia32.h"
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64.h"
#include "x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips.h"
#include "mips/assembler-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
// This file provides base classes and auxiliary methods for defining
// static object visitors used during GC.
// Visiting HeapObject body with a normal ObjectVisitor requires performing

107
deps/v8/src/objects.cc

@ -1883,13 +1883,9 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
pt = pt->GetPrototype()) {
JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
if (result->IsProperty()) {
if (result->IsReadOnly()) {
result->NotFound();
return;
}
if (result->type() == CALLBACKS) {
return;
}
if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
// Found non-callback or read-only callback, stop looking.
break;
}
}
result->NotFound();
@ -3068,7 +3064,9 @@ MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index,
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
FixedArray* backing_store = FixedArray::cast(elements());
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
bool is_arguments =
(GetElementsKind() == JSObject::NON_STRICT_ARGUMENTS_ELEMENTS);
if (is_arguments) {
backing_store = FixedArray::cast(backing_store->get(1));
}
NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
@ -3081,8 +3079,12 @@ MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index,
if (!maybe_elements->To(&new_elements)) {
return maybe_elements;
}
if (is_arguments) {
FixedArray::cast(elements())->set(1, new_elements);
} else {
set_elements(new_elements);
}
}
if (mode == STRICT_DELETION && result == heap->false_value()) {
// In strict mode, attempting to delete a non-configurable property
// throws an exception.
@ -3375,23 +3377,22 @@ MaybeObject* JSObject::PreventExtensions() {
}
// If there are fast elements we normalize.
if (HasFastElements()) {
MaybeObject* result = NormalizeElements();
if (result->IsFailure()) return result;
NumberDictionary* dictionary = NULL;
{ MaybeObject* maybe = NormalizeElements();
if (!maybe->To<NumberDictionary>(&dictionary)) return maybe;
}
// TODO(kmillikin): Handle arguments object with dictionary elements.
ASSERT(HasDictionaryElements());
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
// Make sure that we never go back to fast case.
element_dictionary()->set_requires_slow_elements();
dictionary->set_requires_slow_elements();
// Do a map transition, other objects with this map may still
// be extensible.
Object* new_map;
{ MaybeObject* maybe_new_map = map()->CopyDropTransitions();
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
Map* new_map;
{ MaybeObject* maybe = map()->CopyDropTransitions();
if (!maybe->To<Map>(&new_map)) return maybe;
}
Map::cast(new_map)->set_is_extensible(false);
set_map(Map::cast(new_map));
new_map->set_is_extensible(false);
set_map(new_map);
ASSERT(!map()->is_extensible());
return new_map;
}
@ -4117,6 +4118,8 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
}
}
if (!map_done) continue;
} else {
map_or_index_field = NULL;
}
// That was the regular transitions, now for the prototype transitions.
FixedArray* prototype_transitions =
@ -9428,7 +9431,7 @@ void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
}
ASSERT(storage->length() >= index);
} else {
property_dictionary()->CopyKeysTo(storage);
property_dictionary()->CopyKeysTo(storage, StringDictionary::UNSORTED);
}
}
@ -9505,33 +9508,49 @@ int JSObject::GetLocalElementKeys(FixedArray* storage,
break;
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage, filter);
element_dictionary()->CopyKeysTo(storage,
filter,
NumberDictionary::SORTED);
}
counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
break;
}
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* parameter_map = FixedArray::cast(elements());
int length = parameter_map->length();
for (int i = 2; i < length; ++i) {
if (!parameter_map->get(i)->IsTheHole()) {
if (storage != NULL) storage->set(i - 2, Smi::FromInt(i - 2));
++counter;
}
}
int mapped_length = parameter_map->length() - 2;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
if (arguments->IsDictionary()) {
// Copy the keys from arguments first, because Dictionary::CopyKeysTo
// will insert in storage starting at index 0.
NumberDictionary* dictionary = NumberDictionary::cast(arguments);
if (storage != NULL) dictionary->CopyKeysTo(storage, filter);
if (storage != NULL) {
dictionary->CopyKeysTo(storage, filter, NumberDictionary::UNSORTED);
}
counter += dictionary->NumberOfElementsFilterAttributes(filter);
for (int i = 0; i < mapped_length; ++i) {
if (!parameter_map->get(i + 2)->IsTheHole()) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
++counter;
}
}
if (storage != NULL) storage->SortPairs(storage, counter);
} else {
int length = arguments->length();
for (int i = 0; i < length; ++i) {
if (!arguments->get(i)->IsTheHole()) {
if (storage != NULL) storage->set(i, Smi::FromInt(i));
int backing_length = arguments->length();
int i = 0;
for (; i < mapped_length; ++i) {
if (!parameter_map->get(i + 2)->IsTheHole()) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
++counter;
} else if (i < backing_length && !arguments->get(i)->IsTheHole()) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
++counter;
}
}
for (; i < backing_length; ++i) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
++counter;
}
}
break;
}
@ -10132,7 +10151,9 @@ template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup(
Object*);
template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo(
FixedArray*, PropertyAttributes);
FixedArray*,
PropertyAttributes,
Dictionary<NumberDictionaryShape, uint32_t>::SortMode);
template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty(
int, JSObject::DeleteMode);
@ -10147,7 +10168,8 @@ template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink(
uint32_t);
template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo(
FixedArray*);
FixedArray*,
Dictionary<StringDictionaryShape, String*>::SortMode);
template int
Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes(
@ -11199,8 +11221,10 @@ int Dictionary<Shape, Key>::NumberOfEnumElements() {
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
PropertyAttributes filter) {
void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
PropertyAttributes filter,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
ASSERT(storage->length() >= NumberOfEnumElements());
int capacity = HashTable<Shape, Key>::Capacity();
int index = 0;
@ -11213,7 +11237,9 @@ void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage,
if ((attr & filter) == 0) storage->set(index++, k);
}
}
if (sort_mode == Dictionary<Shape, Key>::SORTED) {
storage->SortPairs(storage, index);
}
ASSERT(storage->length() >= index);
}
@ -11239,7 +11265,9 @@ void StringDictionary::CopyEnumKeysTo(FixedArray* storage,
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) {
void Dictionary<Shape, Key>::CopyKeysTo(
FixedArray* storage,
typename Dictionary<Shape, Key>::SortMode sort_mode) {
ASSERT(storage->length() >= NumberOfElementsFilterAttributes(
static_cast<PropertyAttributes>(NONE)));
int capacity = HashTable<Shape, Key>::Capacity();
@ -11252,6 +11280,9 @@ void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) {
storage->set(index++, k);
}
}
if (sort_mode == Dictionary<Shape, Key>::SORTED) {
storage->SortPairs(storage, index);
}
ASSERT(storage->length() >= index);
}

7
deps/v8/src/objects.h

@ -2770,10 +2770,13 @@ class Dictionary: public HashTable<Shape, Key> {
// Returns the number of enumerable elements in the dictionary.
int NumberOfEnumElements();
enum SortMode { UNSORTED, SORTED };
// Copies keys to preallocated fixed array.
void CopyKeysTo(FixedArray* storage, PropertyAttributes filter);
void CopyKeysTo(FixedArray* storage,
PropertyAttributes filter,
SortMode sort_mode);
// Fill in details for properties into storage.
void CopyKeysTo(FixedArray* storage);
void CopyKeysTo(FixedArray* storage, SortMode sort_mode);
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {

36
deps/v8/src/parser.cc

@ -823,14 +823,24 @@ class ParserFinder {
// form expr.a = ...; expr.b = ...; etc.
class InitializationBlockFinder : public ParserFinder {
public:
InitializationBlockFinder()
: first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {}
// We find and mark the initialization blocks in top level
// non-looping code only. This is because the optimization prevents
// reuse of the map transitions, so it should be used only for code
// that will only be run once.
InitializationBlockFinder(Scope* top_scope, Target* target)
: enabled_(top_scope->DeclarationScope()->is_global_scope() &&
!IsLoopTarget(target)),
first_in_block_(NULL),
last_in_block_(NULL),
block_size_(0) {}
~InitializationBlockFinder() {
if (!enabled_) return;
if (InBlock()) EndBlock();
}
void Update(Statement* stat) {
if (!enabled_) return;
Assignment* assignment = AsAssignment(stat);
if (InBlock()) {
if (BlockContinues(assignment)) {
@ -851,6 +861,14 @@ class InitializationBlockFinder : public ParserFinder {
// the overhead exceeds the savings below this limit.
static const int kMinInitializationBlock = 3;
static bool IsLoopTarget(Target* target) {
while (target != NULL) {
if (target->node()->AsIterationStatement() != NULL) return true;
target = target->previous();
}
return false;
}
// Returns true if the expressions appear to denote the same object.
// In the context of initialization blocks, we only consider expressions
// of the form 'expr.x' or expr["x"].
@ -913,6 +931,7 @@ class InitializationBlockFinder : public ParserFinder {
bool InBlock() { return first_in_block_ != NULL; }
const bool enabled_;
Assignment* first_in_block_;
Assignment* last_in_block_;
int block_size_;
@ -1078,7 +1097,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
TargetScope scope(&this->target_stack_);
ASSERT(processor != NULL);
InitializationBlockFinder block_finder;
InitializationBlockFinder block_finder(top_scope_, target_stack_);
ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
bool directive_prologue = true; // Parsing directive prologue.
@ -1133,12 +1152,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
}
}
// We find and mark the initialization blocks on top level code only.
// This is because the optimization prevents reuse of the map transitions,
// so it should be used only for code that will only be run once.
if (top_scope_->is_global_scope()) {
block_finder.Update(stat);
}
// Find and mark all assignments to named properties in this (this.x =)
if (top_scope_->is_function_scope()) {
this_property_assignment_finder.Update(top_scope_, stat);
@ -1478,9 +1492,13 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
Block* result = new(zone()) Block(labels, 16, false);
Target target(&this->target_stack_, result);
Expect(Token::LBRACE, CHECK_OK);
InitializationBlockFinder block_finder(top_scope_, target_stack_);
while (peek() != Token::RBRACE) {
Statement* stat = ParseStatement(NULL, CHECK_OK);
if (stat && !stat->IsEmpty()) result->AddStatement(stat);
if (stat && !stat->IsEmpty()) {
result->AddStatement(stat);
block_finder.Update(stat);
}
}
Expect(Token::RBRACE, CHECK_OK);
return result;

23
deps/v8/src/platform-cygwin.cc

@ -166,23 +166,6 @@ void OS::Free(void* address, const size_t size) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@ -249,7 +232,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@ -306,7 +288,6 @@ void OS::LogSharedLibraryAddresses() {
}
free(lib_name);
fclose(fp);
#endif
}
@ -591,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
// Cygwin profiler support.
//
@ -769,7 +748,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

21
deps/v8/src/platform-freebsd.cc

@ -181,20 +181,6 @@ void OS::Free(void* buf, const size_t length) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@ -266,15 +252,12 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
#endif
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@ -311,7 +294,6 @@ void OS::LogSharedLibraryAddresses() {
LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
#endif
}
@ -588,8 +570,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static pthread_t GetThreadID() {
pthread_t thread_id = pthread_self();
return thread_id;
@ -817,6 +797,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

24
deps/v8/src/platform-linux.cc

@ -390,23 +390,6 @@ void OS::Free(void* address, const size_t size) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@ -483,7 +466,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
@ -540,7 +522,6 @@ void OS::LogSharedLibraryAddresses() {
}
free(lib_name);
fclose(fp);
#endif
}
@ -548,7 +529,6 @@ static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
void OS::SignalCodeMovingGC() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Support for ll_prof.py.
//
// The Linux profiler built into the kernel logs all mmap's with
@ -564,7 +544,6 @@ void OS::SignalCodeMovingGC() {
ASSERT(addr != MAP_FAILED);
munmap(addr, size);
fclose(f);
#endif
}
@ -859,8 +838,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
// Android runs a fairly new Linux kernel, so signal info is there,
// but the C library doesn't have the structs defined.
@ -1148,6 +1125,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

19
deps/v8/src/platform-macos.cc

@ -169,20 +169,6 @@ void OS::Free(void* address, const size_t size) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
usleep(1000 * milliseconds);
}
@ -248,7 +234,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
unsigned int images_count = _dyld_image_count();
for (unsigned int i = 0; i < images_count; ++i) {
const mach_header* header = _dyld_get_image_header(i);
@ -270,7 +255,6 @@ void OS::LogSharedLibraryAddresses() {
LOG(Isolate::Current(),
SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
}
#endif // ENABLE_LOGGING_AND_PROFILING
}
@ -644,8 +628,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler::PlatformData : public Malloced {
public:
PlatformData() : profiled_thread_(mach_thread_self()) {}
@ -821,6 +803,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

16
deps/v8/src/platform-nullos.cc

@ -217,20 +217,6 @@ void OS::Free(void* buf, const size_t length) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
UNIMPLEMENTED();
}
@ -437,7 +423,6 @@ Semaphore* OS::CreateSemaphore(int count) {
return new NullSemaphore(count);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
class ProfileSampler::PlatformData : public Malloced {
public:
@ -472,6 +457,5 @@ void ProfileSampler::Stop() {
UNIMPLEMENTED();
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

21
deps/v8/src/platform-openbsd.cc

@ -179,20 +179,6 @@ void OS::Free(void* buf, const size_t length) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
UNIMPLEMENTED();
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
UNIMPLEMENTED();
}
#endif
void OS::Sleep(int milliseconds) {
unsigned int ms = static_cast<unsigned int>(milliseconds);
usleep(1000 * ms);
@ -264,15 +250,12 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static unsigned StringToLong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT
}
#endif
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int MAP_LENGTH = 1024;
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return;
@ -309,7 +292,6 @@ void OS::LogSharedLibraryAddresses() {
LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
#endif
}
@ -590,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static pthread_t GetThreadID() {
pthread_t thread_id = pthread_self();
return thread_id;
@ -818,6 +798,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

15
deps/v8/src/platform-posix.cc

@ -37,6 +37,7 @@
#include <sys/resource.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <arpa/inet.h>
#include <netinet/in.h>
@ -130,7 +131,14 @@ int OS::GetLastError() {
//
FILE* OS::FOpen(const char* path, const char* mode) {
return fopen(path, mode);
FILE* file = fopen(path, mode);
if (file == NULL) return NULL;
struct stat file_stat;
if (fstat(fileno(file), &file_stat) != 0) return NULL;
bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
if (is_regular_file) return file;
fclose(file);
return NULL;
}
@ -139,6 +147,11 @@ bool OS::Remove(const char* path) {
}
FILE* OS::OpenTemporaryFile() {
return tmpfile();
}
const char* const OS::LogFileOpenMode = "w";

21
deps/v8/src/platform-solaris.cc

@ -192,23 +192,6 @@ void OS::Free(void* address, const size_t size) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
@ -589,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static pthread_t GetThreadID() {
return pthread_self();
}
@ -817,6 +798,4 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

2
deps/v8/src/platform-tls.h

@ -30,7 +30,7 @@
#ifndef V8_PLATFORM_TLS_H_
#define V8_PLATFORM_TLS_H_
#ifdef V8_FAST_TLS
#ifndef V8_NO_FAST_TLS
// When fast TLS is requested we include the appropriate
// implementation header.

50
deps/v8/src/platform-win32.cc

@ -44,11 +44,6 @@
namespace v8 {
namespace internal {
intptr_t OS::MaxVirtualMemory() {
return 0;
}
// Test for finite value - usually defined in math.h
int isfinite(double x) {
return _finite(x);
@ -206,6 +201,11 @@ int random() {
namespace v8 {
namespace internal {
intptr_t OS::MaxVirtualMemory() {
return 0;
}
double ceiling(double x) {
return ceil(x);
}
@ -749,6 +749,24 @@ bool OS::Remove(const char* path) {
}
FILE* OS::OpenTemporaryFile() {
// tmpfile_s tries to use the root dir, don't use it.
char tempPathBuffer[MAX_PATH];
DWORD path_result = 0;
path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
if (path_result > MAX_PATH || path_result == 0) return NULL;
UINT name_result = 0;
char tempNameBuffer[MAX_PATH];
name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
if (name_result == 0) return NULL;
FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses.
if (result != NULL) {
Remove(tempNameBuffer); // Delete on close.
}
return result;
}
// Open log file in binary mode to avoid /n -> /r/n conversion.
const char* const OS::LogFileOpenMode = "wb";
@ -948,25 +966,6 @@ void OS::Free(void* address, const size_t size) {
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): VirtualProtect has a return value which is ignored here.
DWORD old_protect;
VirtualProtect(address, size, PAGE_READONLY, &old_protect);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): VirtualProtect has a return value which is ignored here.
DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
DWORD old_protect;
VirtualProtect(address, size, new_protect, &old_protect);
}
#endif
void OS::Sleep(int milliseconds) {
::Sleep(milliseconds);
}
@ -1867,8 +1866,6 @@ Socket* OS::CreateSocket() {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
// Win32 profiler support.
@ -2043,6 +2040,5 @@ void Sampler::Stop() {
SetActive(false);
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

12
deps/v8/src/platform.h

@ -177,6 +177,9 @@ class OS {
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
// Opens a temporary file, the file is auto removed on close.
static FILE* OpenTemporaryFile();
// Log file open mode is platform-dependent due to line ends issues.
static const char* const LogFileOpenMode;
@ -206,12 +209,6 @@ class OS {
// Get the Alignment guaranteed by Allocate().
static size_t AllocateAlignment();
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static void Protect(void* address, size_t size);
static void Unprotect(void* address, size_t size, bool is_executable);
#endif
// Returns an indication of whether a pointer is in a space that
// has been allocated by Allocate(). This method may conservatively
// always return false, but giving more accurate information may
@ -603,7 +600,6 @@ class TickSample {
bool has_external_callback : 1;
};
#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler {
public:
// Initialize sampler.
@ -662,8 +658,6 @@ class Sampler {
};
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal
#endif // V8_PLATFORM_H_

4
deps/v8/src/profile-generator-inl.h

@ -28,8 +28,6 @@
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "profile-generator.h"
namespace v8 {
@ -123,6 +121,4 @@ uint64_t HeapEntry::id() {
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_PROFILE_GENERATOR_INL_H_

4
deps/v8/src/profile-generator.cc

@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "v8.h"
#include "profile-generator-inl.h"
@ -3259,5 +3257,3 @@ String* GetConstructorNameForHeapProfile(JSObject* object) {
}
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING

4
deps/v8/src/profile-generator.h

@ -28,8 +28,6 @@
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "allocation.h"
#include "hashmap.h"
#include "../include/v8-profiler.h"
@ -1126,6 +1124,4 @@ String* GetConstructorNameForHeapProfile(JSObject* object);
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING
#endif // V8_PROFILE_GENERATOR_H_

1
deps/v8/src/property.h

@ -206,6 +206,7 @@ class LookupResult BASE_EMBEDDED {
lookup_type_ = HANDLER_TYPE;
holder_ = NULL;
details_ = PropertyDetails(NONE, HANDLER);
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {

12
deps/v8/src/proxy.js

@ -135,3 +135,15 @@ function DerivedSetTrap(receiver, name, val) {
function DerivedHasTrap(name) {
return !!this.getPropertyDescriptor(name)
}
function DerivedKeysTrap() {
var names = this.getOwnPropertyNames()
var enumerableNames = []
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
enumerableNames[count++] = names[i]
}
}
return enumerableNames
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save