Browse Source

Upgrade V8 to 2.2.11

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
2b34363d03
  1. 28
      deps/v8/ChangeLog
  2. 15
      deps/v8/SConstruct
  3. 8
      deps/v8/benchmarks/README.txt
  4. 2
      deps/v8/benchmarks/base.js
  5. 31
      deps/v8/benchmarks/raytrace.js
  6. 7
      deps/v8/benchmarks/revisions.html
  7. 6
      deps/v8/benchmarks/run.html
  8. 11
      deps/v8/benchmarks/splay.js
  9. 48
      deps/v8/include/v8-debug.h
  10. 25
      deps/v8/include/v8-profiler.h
  11. 5
      deps/v8/samples/shell.cc
  12. 5
      deps/v8/src/SConscript
  13. 55
      deps/v8/src/api.cc
  14. 7
      deps/v8/src/arm/assembler-arm-inl.h
  15. 321
      deps/v8/src/arm/assembler-arm.cc
  16. 29
      deps/v8/src/arm/assembler-arm.h
  17. 263
      deps/v8/src/arm/assembler-thumb2-inl.h
  18. 1878
      deps/v8/src/arm/assembler-thumb2.cc
  19. 1036
      deps/v8/src/arm/assembler-thumb2.h
  20. 6
      deps/v8/src/arm/builtins-arm.cc
  21. 513
      deps/v8/src/arm/codegen-arm.cc
  22. 16
      deps/v8/src/arm/codegen-arm.h
  23. 4
      deps/v8/src/arm/constants-arm.cc
  24. 4
      deps/v8/src/arm/cpu-arm.cc
  25. 11
      deps/v8/src/arm/debug-arm.cc
  26. 4
      deps/v8/src/arm/disasm-arm.cc
  27. 4
      deps/v8/src/arm/fast-codegen-arm.cc
  28. 8
      deps/v8/src/arm/frames-arm.cc
  29. 170
      deps/v8/src/arm/full-codegen-arm.cc
  30. 401
      deps/v8/src/arm/ic-arm.cc
  31. 132
      deps/v8/src/arm/jump-target-arm.cc
  32. 4
      deps/v8/src/arm/macro-assembler-arm.cc
  33. 5
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  34. 4
      deps/v8/src/arm/register-allocator-arm.cc
  35. 4
      deps/v8/src/arm/simulator-arm.cc
  36. 52
      deps/v8/src/arm/stub-cache-arm.cc
  37. 53
      deps/v8/src/arm/virtual-frame-arm-inl.h
  38. 122
      deps/v8/src/arm/virtual-frame-arm.cc
  39. 106
      deps/v8/src/arm/virtual-frame-arm.h
  40. 3
      deps/v8/src/assembler.cc
  41. 8
      deps/v8/src/assembler.h
  42. 79
      deps/v8/src/ast-inl.h
  43. 14
      deps/v8/src/ast.cc
  44. 42
      deps/v8/src/ast.h
  45. 2
      deps/v8/src/bootstrapper.cc
  46. 59
      deps/v8/src/builtins.cc
  47. 3
      deps/v8/src/codegen.h
  48. 16
      deps/v8/src/compiler.cc
  49. 2
      deps/v8/src/cpu-profiler-inl.h
  50. 47
      deps/v8/src/cpu-profiler.cc
  51. 16
      deps/v8/src/cpu-profiler.h
  52. 57
      deps/v8/src/d8.js
  53. 11
      deps/v8/src/date.js
  54. 31
      deps/v8/src/debug-debugger.js
  55. 72
      deps/v8/src/debug.cc
  56. 24
      deps/v8/src/debug.h
  57. 17
      deps/v8/src/flag-definitions.h
  58. 35
      deps/v8/src/full-codegen.cc
  59. 76
      deps/v8/src/full-codegen.h
  60. 18
      deps/v8/src/globals.h
  61. 142
      deps/v8/src/heap.cc
  62. 78
      deps/v8/src/heap.h
  63. 5
      deps/v8/src/ia32/assembler-ia32-inl.h
  64. 40
      deps/v8/src/ia32/assembler-ia32.cc
  65. 2
      deps/v8/src/ia32/assembler-ia32.h
  66. 29
      deps/v8/src/ia32/builtins-ia32.cc
  67. 263
      deps/v8/src/ia32/codegen-ia32.cc
  68. 21
      deps/v8/src/ia32/codegen-ia32.h
  69. 4
      deps/v8/src/ia32/cpu-ia32.cc
  70. 4
      deps/v8/src/ia32/debug-ia32.cc
  71. 6
      deps/v8/src/ia32/disasm-ia32.cc
  72. 4
      deps/v8/src/ia32/fast-codegen-ia32.cc
  73. 4
      deps/v8/src/ia32/frames-ia32.cc
  74. 1493
      deps/v8/src/ia32/full-codegen-ia32.cc
  75. 6
      deps/v8/src/ia32/ic-ia32.cc
  76. 4
      deps/v8/src/ia32/jump-target-ia32.cc
  77. 4
      deps/v8/src/ia32/macro-assembler-ia32.cc
  78. 91
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  79. 3
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  80. 4
      deps/v8/src/ia32/register-allocator-ia32.cc
  81. 4
      deps/v8/src/ia32/stub-cache-ia32.cc
  82. 4
      deps/v8/src/ia32/virtual-frame-ia32.cc
  83. 40
      deps/v8/src/ia32/virtual-frame-ia32.h
  84. 63
      deps/v8/src/jump-target-heavy.cc
  85. 242
      deps/v8/src/jump-target-heavy.h
  86. 14
      deps/v8/src/jump-target-light-inl.h
  87. 83
      deps/v8/src/jump-target-light.cc
  88. 187
      deps/v8/src/jump-target-light.h
  89. 64
      deps/v8/src/jump-target.cc
  90. 218
      deps/v8/src/jump-target.h
  91. 2
      deps/v8/src/liveedit.cc
  92. 2
      deps/v8/src/log.cc
  93. 5
      deps/v8/src/macro-assembler.h
  94. 5
      deps/v8/src/macros.py
  95. 8
      deps/v8/src/mark-compact.cc
  96. 4
      deps/v8/src/mips/assembler-mips.cc
  97. 3
      deps/v8/src/mips/builtins-mips.cc
  98. 4
      deps/v8/src/mips/codegen-mips.cc
  99. 5
      deps/v8/src/mips/constants-mips.cc
  100. 4
      deps/v8/src/mips/cpu-mips.cc

28
deps/v8/ChangeLog

@ -1,3 +1,29 @@
2010-05-21: Version 2.2.11
Fix crash bug in liveedit on 64 bit.
Use 'full compiler' when debugging is active. This should increase
the density of possible break points, making single step more fine
grained. This will only take effect for functions compiled after
debugging has been started, so recompilation of all functions is
required to get the full effect. IA32 and x64 only for now.
Misc. fixes to the Solaris build.
Add new flags --print-cumulative-gc-stat and --trace-gc-nvp.
Add filtering of CPU profiles by security context.
Fix crash bug on ARM when running without VFP2 or VFP3.
Incremental performance improvements in all backends.
2010-05-17: Version 2.2.10
Performance improvements in the x64 and ARM backends.
2010-05-10: Version 2.2.9
Allow Object.create to be called with a function (issue 697).
@ -6,7 +32,7 @@
non date string (issue 696).
Allow unaligned memory accesses on ARM targets that support it (by
Subrato K De of CodeAurora <subratokde@codeaurora.org>).
Subrato K De of CodeAurora <subratokde@codeaurora.org>).
C++ API for retrieving JavaScript stack trace information.

15
deps/v8/SConstruct

@ -210,12 +210,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
'LINKFLAGS': ['-m32']
},
'armvariant:thumb2': {
'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
},
'armvariant:arm': {
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'simulator:none': {
@ -764,11 +758,6 @@ SIMPLE_OPTIONS = {
'default': 'hidden',
'help': 'shared library symbol visibility'
},
'armvariant': {
'values': ['arm', 'thumb2', 'none'],
'default': 'none',
'help': 'generate thumb2 instructions instead of arm instructions (default)'
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
@ -962,10 +951,6 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
options['armvariant'] = 'arm'
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
options['armvariant'] = 'none'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips

8
deps/v8/benchmarks/README.txt

@ -61,3 +61,11 @@ Removed duplicate line in random seed code, and changed the name of
the Object.prototype.inherits function in the DeltaBlue benchmark to
inheritsFrom to avoid name clashes when running in Chromium with
extensions enabled.
Changes from Version 5 to Version 6
===================================
Removed dead code from the RayTrace benchmark and changed the Splay
benchmark to avoid converting the same numeric key to a string over
and over again.

2
deps/v8/benchmarks/base.js

@ -78,7 +78,7 @@ BenchmarkSuite.suites = [];
// Scores are not comparable across versions. Bump the version if
// you're making changes that will affect that scores, e.g. if you add
// a new benchmark or change an existing one.
BenchmarkSuite.version = '5';
BenchmarkSuite.version = '6 (candidate)';
// To make the benchmark results predictable, we replace Math.random

31
deps/v8/benchmarks/raytrace.js

@ -205,12 +205,6 @@ Flog.RayTracer.Light.prototype = {
this.intensity = (intensity ? intensity : 10.0);
},
getIntensity: function(distance){
if(distance >= intensity) return 0;
return Math.pow((intensity - distance) / strength, 0.2);
},
toString : function () {
return 'Light [' + this.position.x + ',' + this.position.y + ',' + this.position.z + ']';
}
@ -420,31 +414,6 @@ if(typeof(Flog) == 'undefined') var Flog = {};
if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
Flog.RayTracer.Shape.BaseShape = Class.create();
Flog.RayTracer.Shape.BaseShape.prototype = {
position: null,
material: null,
initialize : function() {
this.position = new Vector(0,0,0);
this.material = new Flog.RayTracer.Material.SolidMaterial(
new Flog.RayTracer.Color(1,0,1),
0,
0,
0
);
},
toString : function () {
return 'Material [gloss=' + this.gloss + ', transparency=' + this.transparency + ', hasTexture=' + this.hasTexture +']';
}
}
/* Fake a Flog.* namespace */
if(typeof(Flog) == 'undefined') var Flog = {};
if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
Flog.RayTracer.Shape.Sphere = Class.create();
Flog.RayTracer.Shape.Sphere.prototype = {

7
deps/v8/benchmarks/revisions.html

@ -20,6 +20,13 @@ the benchmark suite.
</p>
<div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>
<p>Removed dead code from the RayTrace benchmark and changed the Splay
benchmark to avoid converting the same numeric key to a string over
and over again.
</p>
<div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div>
<p>Removed duplicate line in random seed code, and changed the name of

6
deps/v8/benchmarks/run.html

@ -111,12 +111,12 @@ higher scores means better performance: <em>Bigger is better!</em>
<li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
<li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
<li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>904 lines</i>).</li>
<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4684 lines</i>).</li>
<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>).
</li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>379 lines</i>).</li>
</ul>
<p>

11
deps/v8/benchmarks/splay.js

@ -46,16 +46,16 @@ var kSplayTreePayloadDepth = 5;
var splayTree = null;
function GeneratePayloadTree(depth, key) {
function GeneratePayloadTree(depth, tag) {
if (depth == 0) {
return {
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
string : 'String for key ' + key + ' in leaf node'
string : 'String for key ' + tag + ' in leaf node'
};
} else {
return {
left: GeneratePayloadTree(depth - 1, key),
right: GeneratePayloadTree(depth - 1, key)
left: GeneratePayloadTree(depth - 1, tag),
right: GeneratePayloadTree(depth - 1, tag)
};
}
}
@ -74,7 +74,8 @@ function InsertNewNode() {
do {
key = GenerateKey();
} while (splayTree.find(key) != null);
splayTree.insert(key, GeneratePayloadTree(kSplayTreePayloadDepth, key));
var payload = GeneratePayloadTree(kSplayTreePayloadDepth, String(key));
splayTree.insert(key, payload);
return key;
}

48
deps/v8/include/v8-debug.h

@ -143,6 +143,39 @@ class EXPORT Debug {
};
/**
* An event details object passed to the debug event listener.
*/
class EventDetails {
public:
/**
* Event type.
*/
virtual DebugEvent GetEvent() const = 0;
/**
* Access to execution state and event data of the debug event. Don't store
* these cross callbacks as their content becomes invalid.
*/
virtual Handle<Object> GetExecutionState() const = 0;
virtual Handle<Object> GetEventData() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in it's own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding callbak whet it was registered.
*/
virtual Handle<Value> GetCallbackData() const = 0;
virtual ~EventDetails() {}
};
/**
* Debug event callback function.
*
@ -157,6 +190,15 @@ class EXPORT Debug {
Handle<Object> event_data,
Handle<Value> data);
/**
* Debug event callback function.
*
* \param event_details object providing information about the debug event
*
* A EventCallback2 does not take possession of the event data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*EventCallback2)(const EventDetails& event_details);
/**
* Debug message callback function.
@ -165,7 +207,7 @@ class EXPORT Debug {
* \param length length of the message
* \param client_data the data value passed when registering the message handler
* A MessageHandler does not take posession of the message string,
* A MessageHandler does not take possession of the message string,
* and must not rely on the data persisting after the handler returns.
*
* This message handler is deprecated. Use MessageHandler2 instead.
@ -178,7 +220,7 @@ class EXPORT Debug {
*
* \param message the debug message handler message object
* A MessageHandler does not take posession of the message data,
* A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
@ -196,6 +238,8 @@ class EXPORT Debug {
// Set a C debug event listener.
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
static bool SetDebugEventListener2(EventCallback2 that,
Handle<Value> data = Handle<Value>());
// Set a JavaScript debug event listener.
static bool SetDebugEventListener(v8::Handle<v8::Object> that,

25
deps/v8/include/v8-profiler.h

@ -139,6 +139,15 @@ class V8EXPORT CpuProfile {
*/
class V8EXPORT CpuProfiler {
public:
/**
* A note on security tokens usage. As scripts from different
* origins can run inside a single V8 instance, it is possible to
* have functions from different security contexts intermixed in a
* single CPU profile. To avoid exposing function names belonging to
* other contexts, filtering by security token is performed while
* obtaining profiling results.
*/
/**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
@ -146,16 +155,22 @@ class V8EXPORT CpuProfiler {
static int GetProfilesCount();
/** Returns a profile by index. */
static const CpuProfile* GetProfile(int index);
static const CpuProfile* GetProfile(
int index,
Handle<Value> security_token = Handle<Value>());
/** Returns a profile by uid. */
static const CpuProfile* FindProfile(unsigned uid);
static const CpuProfile* FindProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
* once. Attempts to start collecting several profiles with the same
* title are silently ignored.
* title are silently ignored. While collecting a profile, functions
* from all security contexts are included in it. The token-based
* filtering is only performed when querying for a profile.
*/
static void StartProfiling(Handle<String> title);
@ -163,7 +178,9 @@ class V8EXPORT CpuProfiler {
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
static const CpuProfile* StopProfiling(Handle<String> title);
static const CpuProfile* StopProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>());
};

5
deps/v8/samples/shell.cc

@ -299,5 +299,10 @@ void ReportException(v8::TryCatch* try_catch) {
printf("^");
}
printf("\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
printf("%s\n", stack_trace_string);
}
}
}

5
deps/v8/src/SConscript

@ -136,13 +136,8 @@ SOURCES = {
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
"""),
'armvariant:arm': Split("""
arm/assembler-arm.cc
"""),
'armvariant:thumb2': Split("""
arm/assembler-thumb2.cc
"""),
'arch:mips': Split("""
fast-codegen.cc
mips/assembler-mips.cc

55
deps/v8/src/api.cc

@ -48,7 +48,7 @@
#define LOG_API(expr) LOG(ApiEntryCall(expr))
#ifdef ENABLE_HEAP_PROTECTION
#ifdef ENABLE_VMSTATE_TRACKING
#define ENTER_V8 i::VMState __state__(i::OTHER)
#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
#else
@ -3992,10 +3992,40 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
// --- D e b u g S u p p o r t ---
#ifdef ENABLE_DEBUGGER_SUPPORT
static v8::Debug::EventCallback event_callback = NULL;
static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
if (event_callback) {
event_callback(event_details.GetEvent(),
event_details.GetExecutionState(),
event_details.GetEventData(),
event_details.GetCallbackData());
}
}
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
EnsureInitialized("v8::Debug::SetDebugEventListener()");
ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
ENTER_V8;
event_callback = that;
HandleScope scope;
i::Handle<i::Object> proxy = i::Factory::undefined_value();
if (that != NULL) {
proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
}
i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
return true;
}
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
EnsureInitialized("v8::Debug::SetDebugEventListener2()");
ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
ENTER_V8;
HandleScope scope;
i::Handle<i::Object> proxy = i::Factory::undefined_value();
if (that != NULL) {
@ -4250,15 +4280,23 @@ int CpuProfiler::GetProfilesCount() {
}
const CpuProfile* CpuProfiler::GetProfile(int index) {
const CpuProfile* CpuProfiler::GetProfile(int index,
Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::GetProfile");
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::GetProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
index));
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::FindProfile");
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::FindProfile(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
uid));
}
@ -4268,10 +4306,13 @@ void CpuProfiler::StartProfiling(Handle<String> title) {
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
Handle<Value> security_token) {
IsDeadCheck("v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
i::CpuProfiler::StopProfiling(
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
*Utils::OpenHandle(*title)));
}
#endif // ENABLE_LOGGING_AND_PROFILING

7
deps/v8/src/arm/assembler-arm-inl.h

@ -169,13 +169,6 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());

321
deps/v8/src/arm/assembler-arm.cc

@ -36,6 +36,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
@ -106,6 +108,15 @@ void CpuFeatures::Probe() {
const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on ARM means that it is a movw/movt instruction. We don't
// generate those yet.
return false;
}
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
Instr* pc = reinterpret_cast<Instr*>(pc_);
@ -268,6 +279,20 @@ const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
static const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16;
static const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16;
static const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
static const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16;
static const Instr kLdrStrInstrTypeMask = 0xffff0000;
static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
static const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
@ -395,6 +420,43 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
}
Register Assembler::GetRd(Instr instr) {
Register reg;
reg.code_ = ((instr & kRdMask) >> kRdShift);
return reg;
}
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
bool Assembler::IsPop(Instr instr) {
return ((instr & ~kRdMask) == kPopRegPattern);
}
bool Assembler::IsStrRegFpOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
}
bool Assembler::IsLdrRegFpOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
}
bool Assembler::IsStrRegFpNegOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
}
bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -887,15 +949,12 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
// str(src, MemOperand(sp, 4, NegPreIndex), al);
// add(sp, sp, Operand(kPointerSize));
// Both instructions can be eliminated.
int pattern_size = 2 * kInstrSize;
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
}
}
@ -1086,20 +1145,170 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
}
addrmod2(cond | B26 | L, dst, src);
// Eliminate pattern: push(r), pop(r)
// str(r, MemOperand(sp, 4, NegPreIndex), al)
// ldr(r, MemOperand(sp, 4, PostIndex), al)
// Both instructions can be eliminated.
int pattern_size = 2 * kInstrSize;
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_push_pop_elimination) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
// Eliminate pattern: push(ry), pop(rx)
// str(ry, MemOperand(sp, 4, NegPreIndex), al)
// ldr(rx, MemOperand(sp, 4, PostIndex), al)
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (can_peephole_optimize(2) &&
IsPush(push_instr) &&
IsPop(pop_instr)) {
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
Register reg_pushed, reg_popped;
reg_pushed = GetRd(push_instr);
reg_popped = GetRd(pop_instr);
pc_ -= 2 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop
mov(reg_popped, reg_pushed);
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
}
} else {
// For consecutive push and pop on the same register,
// both the push and the pop can be deleted.
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
}
}
}
if (can_peephole_optimize(2)) {
Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
if ((IsStrRegFpOffset(str_instr) &&
IsLdrRegFpOffset(ldr_instr)) ||
(IsStrRegFpNegOffset(str_instr) &&
IsLdrRegFpNegOffset(ldr_instr))) {
if ((ldr_instr & kLdrStrInstrArgumentMask) ==
(str_instr & kLdrStrInstrArgumentMask)) {
// Pattern: Ldr/str same fp+offset, same register.
//
// The following:
// str rx, [fp, #-12]
// ldr rx, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
pc_ -= 1 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
}
} else if ((ldr_instr & kLdrStrOffsetMask) ==
(str_instr & kLdrStrOffsetMask)) {
// Pattern: Ldr/str same fp+offset, different register.
//
// The following:
// str rx, [fp, #-12]
// ldr ry, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
// mov ry, rx
Register reg_stored, reg_loaded;
reg_stored = GetRd(str_instr);
reg_loaded = GetRd(ldr_instr);
pc_ -= 1 * kInstrSize;
// Insert a mov instruction, which is better than ldr.
mov(reg_loaded, reg_stored);
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
}
}
}
}
if (can_peephole_optimize(3)) {
Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(mem_write_instr) &&
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
if ((mem_write_instr & kRdMask) ==
(mem_read_instr & kRdMask)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr rx, [sp], #+4
//
// Becomes:
// if(rx == rz)
// delete all
// else
// ldr rz, [fp, #-24]
if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
// Reinsert back the ldr rz.
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
}
} else {
// Pattern: push & pop from/to different registers
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr ry, [sp], #+4
//
// Becomes:
// if(ry == rz)
// mov ry, rx;
// else if(rx != rz)
// ldr rz, [fp, #-24]
// mov ry, rx
// else if((ry != rz) || (rx == rz)) becomes:
// mov ry, rx
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
} else if ((mem_write_instr & kRdMask)
!= (ldr_instr & kRdMask)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
} else if (((mem_read_instr & kRdMask)
!= (ldr_instr & kRdMask)) ||
((mem_write_instr & kRdMask)
== (ldr_instr & kRdMask)) ) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
}
}
}
}
}
}
@ -1111,16 +1320,13 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
// Eliminate pattern: pop(), push(r)
// add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
// -> str r, [sp, 0], al
int pattern_size = 2 * kInstrSize;
if (FLAG_push_pop_elimination &&
last_bound_pos_ <= (pc_offset() - pattern_size) &&
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
}
}
@ -1162,12 +1368,18 @@ void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B4, dst, src);
#else
ldr(dst, src, cond);
// Generate two ldr instructions if ldrd is not available.
MemOperand src1(src);
src1.set_offset(src1.offset() + 4);
Register dst1(dst);
dst1.code_ = dst1.code_ + 1;
ldr(dst1, src1, cond);
dst1.set_code(dst1.code() + 1);
if (dst.is(src.rn())) {
ldr(dst1, src1, cond);
ldr(dst, src, cond);
} else {
ldr(dst, src, cond);
ldr(dst1, src1, cond);
}
#endif
}
@ -1177,11 +1389,12 @@ void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
#else
str(src, dst, cond);
// Generate two str instructions if strd is not available.
MemOperand dst1(dst);
dst1.set_offset(dst1.offset() + 4);
Register src1(src);
src1.code_ = src1.code_ + 1;
src1.set_code(src1.code() + 1);
str(src, dst, cond);
str(src1, dst1, cond);
#endif
}
@ -1216,26 +1429,6 @@ void Assembler::stm(BlockAddrMode am,
}
// Semaphore instructions.
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
emit(cond | P | base.code()*B16 | dst.code()*B12 |
B7 | B4 | src.code());
}
void Assembler::swpb(Register dst,
Register src,
Register base,
Condition cond) {
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
ASSERT(!dst.is(base) && !src.is(base));
emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
B7 | B4 | src.code());
}
// Exception-generating instructions and debugging support.
void Assembler::stop(const char* msg) {
#ifndef __arm__
@ -1779,34 +1972,6 @@ void Assembler::nop(int type) {
}
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
Condition cond) {
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing
sub(dst, x.rn_, Operand(x.offset_), s, cond);
else
add(dst, x.rn_, Operand(x.offset_), s, cond);
} else {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
// register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized.
ASSERT(!x.rm_.is(pc));
if ((am & P) == 0) // post indexing
mov(dst, Operand(x.rn_), s, cond);
else if ((am & U) == 0) // negative indexing
sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
else
add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
}
}
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
@ -2062,3 +2227,5 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

29
deps/v8/src/arm/assembler-arm.h

@ -80,6 +80,11 @@ struct Register {
return 1 << code_;
}
void set_code(int code) {
code_ = code;
ASSERT(is_valid());
}
// Unfortunately we can't make this private in a struct.
int code_;
};
@ -458,7 +463,8 @@ class MemOperand BASE_EMBEDDED {
return offset_;
}
Register rm() const {return rm_;}
Register rn() const { return rn_; }
Register rm() const { return rm_; }
private:
Register rn_; // base
@ -774,10 +780,6 @@ class Assembler : public Malloced {
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Semaphore instructions
void swp(Register dst, Register src, Register base, Condition cond = al);
void swpb(Register dst, Register src, Register base, Condition cond = al);
// Exception-generating instructions and debugging support
void stop(const char* msg);
@ -924,10 +926,6 @@ class Assembler : public Malloced {
add(sp, sp, Operand(kPointerSize));
}
// Load effective address of memory operand x into register dst
void lea(Register dst, const MemOperand& x,
SBit s = LeaveCC, Condition cond = al);
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
@ -976,6 +974,12 @@ class Assembler : public Malloced {
int current_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
bool can_peephole_optimize(int instructions) {
if (!FLAG_peephole_optimization) return false;
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
@ -987,6 +991,13 @@ class Assembler : public Malloced {
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
static bool IsLdrRegFpOffset(Instr instr);
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
protected:

263
deps/v8/src/arm/assembler-thumb2-inl.h

@ -1,263 +0,0 @@
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
#include "arm/assembler-thumb2.h"
#include "cpu.h"
namespace v8 {
namespace internal {
Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // relocate entry
}
// We do not use pc relative addressing on ARM, so there is
// nothing else to do.
}
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return Assembler::target_address_at(pc_);
}
Address RelocInfo::target_address_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_address(Address target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
}
Object* RelocInfo::target_object() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_at(Assembler::target_address_address_at(pc_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
Object** RelocInfo::target_object_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
}
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
}
Address RelocInfo::call_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
}
Object* RelocInfo::call_object() {
return *call_object_address();
}
Object** RelocInfo::call_object_address() {
ASSERT(IsPatchedReturnSequence());
// The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_object(Object* target) {
*call_object_address() = target;
}
bool RelocInfo::IsPatchedReturnSequence() {
// On ARM a "call instruction" is actually two instructions.
// mov lr, pc
// ldr pc, [pc, #XXX]
return (Assembler::instr_at(pc_) == kMovLrPc)
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
== kLdrPCPattern);
}
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
rmode_ = rmode;
}
Operand::Operand(const char* s) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE;
}
Operand::Operand(Register rm) {
rm_ = rm;
rs_ = no_reg;
shift_op_ = LSL;
shift_imm_ = 0;
}
bool Operand::is_reg() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true);
}
}
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
Address Assembler::target_address_address_at(Address pc) {
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
// what we need to patch.
static const int32_t kBxInstMask = 0x0ffffff0;
static const int32_t kBxInstPattern = 0x012fff10;
if ((instr & kBxInstMask) == kBxInstPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
// Verify that the instruction to patch is a
// ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
return target_pc + offset + 8;
}
Address Assembler::target_address_at(Address pc) {
return Memory::Address_at(target_address_address_at(pc));
}
void Assembler::set_target_at(Address constant_pool_entry,
Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::set_target_address_at(Address pc, Address target) {
Memory::Address_at(target_address_address_at(pc)) = target;
// Intuitively, we would think it is necessary to flush the instruction cache
// after patching a target address in the code as follows:
// CPU::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction was actually patched by the assignment
// above; the target address is not part of an instruction, it is patched in
// the constant pool and is read via a data access; the instruction accessing
// this address in the constant pool remains unchanged.
}
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_

1878
deps/v8/src/arm/assembler-thumb2.cc

File diff suppressed because it is too large

1036
deps/v8/src/arm/assembler-thumb2.h

File diff suppressed because it is too large

6
deps/v8/src/arm/builtins-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
@ -130,7 +132,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// of the JSArray.
// result: JSObject
// scratch2: start of next object
__ lea(scratch1, MemOperand(result, JSArray::kSize));
__ add(scratch1, result, Operand(JSArray::kSize));
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
@ -1311,3 +1313,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

513
deps/v8/src/arm/codegen-arm.cc

@ -27,12 +27,15 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "jsregexp.h"
#include "jump-target-light-inl.h"
#include "parser.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
@ -40,10 +43,12 @@
#include "runtime.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
#include "virtual-frame-arm-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@ -274,7 +279,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_.SetExpectedHeight();
function_return_is_shadowed_ = false;
// Generate code to 'execute' declarations and initialize functions
@ -1143,44 +1148,66 @@ void CodeGenerator::SmiOperation(Token::Value op,
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
__ tst(tos, Operand(kSmiTagMask));
deferred->Branch(ne);
__ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // remove tags
uint32_t problematic_mask = kSmiTagMask;
// For unsigned shift by zero all negative smis are problematic.
if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
__ tst(tos, Operand(problematic_mask));
deferred->Branch(ne); // Go slow for problematic input.
switch (op) {
case Token::SHL: {
if (shift_value != 0) {
__ mov(scratch, Operand(scratch, LSL, shift_value));
int adjusted_shift = shift_value - kSmiTagSize;
ASSERT(adjusted_shift >= 0);
if (adjusted_shift != 0) {
__ mov(scratch, Operand(tos, LSL, adjusted_shift));
// Check that the *signed* result fits in a smi.
__ add(scratch2, scratch, Operand(0x40000000), SetCC);
deferred->Branch(mi);
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
} else {
// Check that the *signed* result fits in a smi.
__ add(scratch2, tos, Operand(0x40000000), SetCC);
deferred->Branch(mi);
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
}
}
// check that the *signed* result fits in a smi
__ add(scratch2, scratch, Operand(0x40000000), SetCC);
deferred->Branch(mi);
break;
}
case Token::SHR: {
// LSR by immediate 0 means shifting 32 bits.
if (shift_value != 0) {
__ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag.
// LSR by immediate 0 means shifting 32 bits.
__ mov(scratch, Operand(scratch, LSR, shift_value));
if (shift_value == 1) {
// check that the *unsigned* result fits in a smi
// neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
}
// check that the *unsigned* result fits in a smi
// neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging
// - 0x40000000: this number would convert to negative when
// smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi
__ tst(scratch, Operand(0xc0000000));
deferred->Branch(ne);
break;
}
case Token::SAR: {
// In the ARM instructions set, ASR by immediate 0 means shifting 32
// bits.
if (shift_value != 0) {
// ASR by immediate 0 means shifting 32 bits.
__ mov(scratch, Operand(scratch, ASR, shift_value));
// Do the shift and the tag removal in one operation. If the shift
// is 31 bits (the highest possible value) then we emit the
// instruction as a shift by 0 which means shift arithmetically by
// 32.
__ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
// Put tag back.
__ mov(tos, Operand(tos, LSL, kSmiTagSize));
}
break;
}
default: UNREACHABLE();
}
__ mov(tos, Operand(scratch, LSL, kSmiTagSize));
deferred->BindExit();
frame_->EmitPush(tos);
break;
@ -1343,6 +1370,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// give us a megamorphic load site. Not super, but it works.
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame_->Dup();
frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
@ -1549,7 +1577,7 @@ void CodeGenerator::VisitBlock(Block* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
VisitStatementsAndSpill(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@ -1836,7 +1864,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
LoadAndSpill(node->tag());
@ -1925,7 +1953,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DoWhileStatement");
CodeForStatementPosition(node);
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
JumpTarget body(JumpTarget::BIDIRECTIONAL);
IncrementLoopNesting();
@ -1935,14 +1963,14 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
switch (info) {
case ALWAYS_TRUE:
node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
break;
case ALWAYS_FALSE:
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->continue_target()->SetExpectedHeight();
break;
case DONT_KNOW:
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->continue_target()->SetExpectedHeight();
body.Bind();
break;
}
@ -2006,12 +2034,12 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
if (info == ALWAYS_FALSE) return;
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
// Label the top of the loop with the continue target for the backward
// CFG edge.
node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
if (info == DONT_KNOW) {
@ -2060,17 +2088,17 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
ConditionAnalysis info = AnalyzeCondition(node->cond());
if (info == ALWAYS_FALSE) return;
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
IncrementLoopNesting();
// If there is no update statement, label the top of the loop with the
// continue target, otherwise with the loop target.
JumpTarget loop(JumpTarget::BIDIRECTIONAL);
if (node->next() == NULL) {
node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
node->continue_target()->SetExpectedHeight();
node->continue_target()->Bind();
} else {
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->continue_target()->SetExpectedHeight();
loop.Bind();
}
@ -2275,8 +2303,8 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// sp[4] : enumerable
// Grab the current frame's height for the break and continue
// targets only after all the state is pushed on the frame.
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
node->break_target()->SetExpectedHeight();
node->continue_target()->SetExpectedHeight();
// Load the current count to r0, load the length to r1.
__ ldrd(r0, frame_->ElementAt(0));
@ -2766,45 +2794,13 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
// If there was no control flow to slow, we can exit early.
if (!slow.is_linked()) {
frame_->EmitPush(r0);
return;
}
frame_->SpillAll();
done.Jump();
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
__ ldr(r0,
ContextSlotOperandCheckExtensions(potential_slot,
r1,
r2,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
// There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around
// it.
done.Jump();
}
}
// Generate fast case for loading from slots that correspond to
// local/global variables or arguments unless they are shadowed by
// eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(slot,
typeof_state,
&slow,
&done);
slow.Bind();
VirtualFrame::SpilledScope spilled_scope(frame_);
@ -3014,8 +3010,67 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT);
// Drop the global object. The result is in r0.
frame_->Drop();
}
void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow,
JumpTarget* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
frame_->SpillAll();
done->Jump();
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
__ ldr(r0,
ContextSlotOperandCheckExtensions(potential_slot,
r1,
r2,
slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
done->Jump();
} else if (rewrite != NULL) {
// Generate fast case for argument loads.
Property* property = rewrite->AsProperty();
if (property != NULL) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
Literal* key_literal = property->key()->AsLiteral();
if (obj_proxy != NULL &&
key_literal != NULL &&
obj_proxy->IsArguments() &&
key_literal->handle()->IsSmi()) {
// Load arguments object if there are no eval-introduced
// variables. Then load the argument from the arguments
// object using keyed load.
__ ldr(r0,
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
r1,
r2,
slow));
frame_->EmitPush(r0);
__ mov(r1, Operand(key_literal->handle()));
frame_->EmitPush(r1);
EmitKeyedLoad();
done->Jump();
}
}
}
}
}
@ -3368,7 +3423,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame_->Dup();
}
EmitNamedLoad(name, var != NULL);
frame_->Drop(); // Receiver is left on the stack.
frame_->EmitPush(r0);
// Perform the binary operation.
@ -3507,9 +3561,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
// Perform the assignment. It is safe to ignore constants here.
ASSERT(node->op() != Token::INIT_CONST);
CodeForSourcePosition(node->position());
frame_->PopToR0();
EmitKeyedStore(prop->key()->type());
frame_->Drop(2); // Key and receiver are left on the stack.
frame_->EmitPush(r0);
// Stack layout:
@ -3705,52 +3757,26 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript examples:
//
// with (obj) foo(1, 2, 3) // foo is in obj
// with (obj) foo(1, 2, 3) // foo may be in obj.
//
// function f() {};
// function g() {
// eval(...);
// f(); // f could be in extension object
// f(); // f could be in extension object.
// }
// ----------------------------------
// JumpTargets do not yet support merging frames so the frame must be
// spilled when jumping to these targets.
JumpTarget slow;
JumpTarget done;
JumpTarget slow, done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow);
frame_->EmitPush(r0);
LoadGlobalReceiver(r1);
done.Jump();
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = var->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
__ ldr(r0,
ContextSlotOperandCheckExtensions(potential_slot,
r1,
r2,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
frame_->EmitPush(r0);
LoadGlobalReceiver(r1);
done.Jump();
}
}
// Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(var->slot(),
NOT_INSIDE_TYPEOF,
&slow,
&done);
slow.Bind();
// Load the function
@ -3764,7 +3790,18 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r0); // function
frame_->EmitPush(r1); // receiver
done.Bind();
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
JumpTarget call;
call.Jump();
done.Bind();
frame_->EmitPush(r0); // function
LoadGlobalReceiver(r1); // receiver
call.Bind();
}
// Call the function. At this point, everything is spilled but the
// function and receiver are in r0 and r1.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@ -4892,7 +4929,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@ -4901,10 +4937,8 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
// Postfix: Make room for the result.
if (is_postfix) {
__ mov(r0, Operand(0));
frame_->EmitPush(r0);
frame_->EmitPush(Operand(Smi::FromInt(0)));
}
// A constant reference is not saved to, so a constant reference is not a
@ -4914,35 +4948,33 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
if (!is_postfix) {
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
frame_->EmitPush(Operand(Smi::FromInt(0)));
}
ASSERT_EQ(original_height + 1, frame_->height());
return;
}
// This pushes 0, 1 or 2 words on the object to be used later when updating
// the target. It also pushes the current value of the target.
target.GetValue();
frame_->EmitPop(r0);
JumpTarget slow;
JumpTarget exit;
// Load the value (1) into register r1.
__ mov(r1, Operand(Smi::FromInt(1)));
// Check for smi operand.
__ tst(r0, Operand(kSmiTagMask));
Register value = frame_->PopToRegister();
__ tst(value, Operand(kSmiTagMask));
slow.Branch(ne);
// Postfix: Store the old value as the result.
if (is_postfix) {
__ str(r0, frame_->ElementAt(target.size()));
frame_->SetElementAt(value, target.size());
}
// Perform optimistic increment/decrement.
if (is_increment) {
__ add(r0, r0, Operand(r1), SetCC);
__ add(value, value, Operand(Smi::FromInt(1)), SetCC);
} else {
__ sub(r0, r0, Operand(r1), SetCC);
__ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
}
// If the increment/decrement didn't overflow, we're done.
@ -4950,41 +4982,50 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Revert optimistic increment/decrement.
if (is_increment) {
__ sub(r0, r0, Operand(r1));
__ sub(value, value, Operand(Smi::FromInt(1)));
} else {
__ add(r0, r0, Operand(r1));
__ add(value, value, Operand(Smi::FromInt(1)));
}
// Slow case: Convert to number.
// Slow case: Convert to number. At this point the
// value to be incremented is in the value register..
slow.Bind();
// Convert the operand to a number.
frame_->EmitPush(value);
{
// Convert the operand to a number.
frame_->EmitPush(r0);
VirtualFrame::SpilledScope spilled(frame_);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
}
if (is_postfix) {
// Postfix: store to result (on the stack).
__ str(r0, frame_->ElementAt(target.size()));
}
// Compute the new value.
__ mov(r1, Operand(Smi::FromInt(1)));
frame_->EmitPush(r0);
frame_->EmitPush(r1);
if (is_increment) {
frame_->CallRuntime(Runtime::kNumberAdd, 2);
} else {
frame_->CallRuntime(Runtime::kNumberSub, 2);
if (is_postfix) {
// Postfix: store to result (on the stack).
__ str(r0, frame_->ElementAt(target.size()));
}
// Compute the new value.
frame_->EmitPush(r0);
frame_->EmitPush(Operand(Smi::FromInt(1)));
if (is_increment) {
frame_->CallRuntime(Runtime::kNumberAdd, 2);
} else {
frame_->CallRuntime(Runtime::kNumberSub, 2);
}
}
__ Move(value, r0);
// Store the new value in the target if not const.
// At this point the answer is in the value register.
exit.Bind();
frame_->EmitPush(r0);
frame_->EmitPush(value);
// Set the target with the result, leaving the result on
// top of the stack. Removes the target from the stack if
// it has a non-zero size.
if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: Discard the new value and use the old.
if (is_postfix) frame_->EmitPop(r0);
if (is_postfix) frame_->Pop();
ASSERT_EQ(original_height + 1, frame_->height());
}
@ -5387,26 +5428,30 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
explicit DeferredReferenceGetNamedValue(Register receiver,
Handle<String> name)
: receiver_(receiver), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
private:
Register receiver_;
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
ASSERT(receiver_.is(r0) || receiver_.is(r1));
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
// Setup the registers and call load IC.
// On entry to this deferred code, r0 is assumed to already contain the
// receiver from the top of the stack.
// Ensure receiver in r0 and name in r2 to match load ic calling convention.
__ Move(r0, receiver_);
__ mov(r2, Operand(name_));
// The rest of the instructions in the deferred code must be together.
@ -5427,20 +5472,34 @@ void DeferredReferenceGetNamedValue::Generate() {
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
DeferredReferenceGetKeyedValue() {
DeferredReferenceGetKeyedValue(Register key, Register receiver)
: key_(key), receiver_(receiver) {
set_comment("[ DeferredReferenceGetKeyedValue");
}
virtual void Generate();
private:
Register key_;
Register receiver_;
};
void DeferredReferenceGetKeyedValue::Generate() {
ASSERT((key_.is(r0) && receiver_.is(r1)) ||
(key_.is(r1) && receiver_.is(r0)));
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
// Ensure key in r0 and receiver in r1 to match keyed load ic calling
// convention.
if (key_.is(r1)) {
__ Swap(r0, r1, ip);
}
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has the arguments key and receiver in r0 and r1.
@ -5460,11 +5519,19 @@ void DeferredReferenceGetKeyedValue::Generate() {
class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue() {
DeferredReferenceSetKeyedValue(Register value,
Register key,
Register receiver)
: value_(value), key_(key), receiver_(receiver) {
set_comment("[ DeferredReferenceSetKeyedValue");
}
virtual void Generate();
private:
Register value_;
Register key_;
Register receiver_;
};
@ -5475,10 +5542,17 @@ void DeferredReferenceSetKeyedValue::Generate() {
__ IncrementCounter(
&Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
// Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
// calling convention.
if (value_.is(r1)) {
__ Swap(r0, r1, ip);
}
ASSERT(receiver_.is(r2));
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has receiver amd key on the stack and the value to
// store in r0.
// Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
@ -5516,10 +5590,11 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// this code
// Load the receiver from the stack.
frame_->SpillAllButCopyTOSToR0();
Register receiver = frame_->PopToRegister();
VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(name);
new DeferredReferenceGetNamedValue(receiver, name);
#ifdef DEBUG
int kInlinedNamedLoadInstructions = 7;
@ -5529,19 +5604,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Check that the receiver is a heap object.
__ tst(r0, Operand(kSmiTagMask));
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r0, 0));
__ ldr(r0, MemOperand(receiver, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
@ -5576,15 +5651,14 @@ void CodeGenerator::EmitKeyedLoad() {
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Load the key and receiver from the stack to r0 and r1.
frame_->PopToR1R0();
Register receiver = r0;
Register key = r1;
// Load the key and receiver from the stack.
Register key = frame_->PopToRegister();
Register receiver = frame_->PopToRegister(key);
VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects key and receiver in r0 and r1.
// The deferred code expects key and receiver in registers.
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue();
new DeferredReferenceGetKeyedValue(key, receiver);
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
@ -5594,17 +5668,16 @@ void CodeGenerator::EmitKeyedLoad() {
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
#ifdef DEBUG
int kInlinedKeyedLoadInstructions = 19;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
@ -5632,17 +5705,15 @@ void CodeGenerator::EmitKeyedLoad() {
__ add(scratch1,
scratch1,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0,
__ ldr(scratch1,
MemOperand(scratch1, key, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
__ cmp(r0, scratch2);
// This is the only branch to deferred where r0 and r1 do not contain the
// receiver and key. We can't just load undefined here because we have to
// check the prototype.
__ cmp(scratch1, scratch2);
deferred->Branch(eq);
__ mov(r0, scratch1);
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedLoadInstructions,
ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@ -5652,78 +5723,86 @@ void CodeGenerator::EmitKeyedLoad() {
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
VirtualFrame::SpilledScope scope(frame_);
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
// Inline the keyed store.
Comment cmnt(masm_, "[ Inlined store to keyed property");
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue();
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
Register scratch3 = r3;
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_store_inline, 1,
frame_->scratch0(), frame_->scratch1());
scratch1, scratch2);
// Load the value, key and receiver from the stack.
Register value = frame_->PopToRegister();
Register key = frame_->PopToRegister(value);
Register receiver = r2;
frame_->EmitPop(receiver);
VirtualFrame::SpilledScope spilled(frame_);
// The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(value, key, receiver);
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
__ tst(r0, Operand(kSmiTagMask));
__ tst(value, Operand(kSmiTagMask));
deferred->Branch(ne);
// Load the key and receiver from the stack.
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r2, MemOperand(sp, kPointerSize));
// Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask));
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
// Check that the receiver is a heap object.
__ tst(r2, Operand(kSmiTagMask));
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check that the receiver is a JSArray.
__ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
__ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
deferred->Branch(ne);
// Check that the key is within bounds. Both the key and the length of
// the JSArray are smis. Use unsigned comparison to handle negative keys.
__ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
__ cmp(r3, r1);
__ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ cmp(scratch1, key);
deferred->Branch(ls); // Unsigned less equal.
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
#ifdef DEBUG
int kInlinedKeyedStoreInstructions = 7;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
__ mov(r5, Operand(Factory::fixed_array_map()));
__ cmp(r4, r5);
#ifdef DEBUG
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
__ mov(scratch3, Operand(Factory::fixed_array_map()));
__ cmp(scratch2, scratch3);
deferred->Branch(ne);
// Store the value.
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r3, r1, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
__ add(scratch1, scratch1,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(value,
MemOperand(scratch1, key, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedStoreInstructions,
ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
@ -5786,19 +5865,20 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
if (persist_after_get_) {
cgen_->frame()->Dup();
}
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
if (!persist_after_get_) set_unloaded();
break;
}
case KEYED: {
ASSERT(property != NULL);
if (persist_after_get_) {
cgen_->frame()->Dup2();
}
ASSERT(property != NULL);
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
if (!persist_after_get_) set_unloaded();
@ -5839,16 +5919,13 @@ void Reference::SetValue(InitState init_state) {
}
case KEYED: {
VirtualFrame::SpilledScope scope(frame);
Comment cmnt(masm, "[ Store to keyed Property");
Property* property = expression_->AsProperty();
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
frame->EmitPop(r0); // Value.
cgen_->EmitKeyedStore(property->key()->type());
frame->EmitPush(r0);
cgen_->UnloadReference(this);
set_unloaded();
break;
}
@ -8486,9 +8563,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifndef V8_NATIVE_REGEXP
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_NATIVE_REGEXP
#else // V8_INTERPRETED_REGEXP
if (!FLAG_regexp_entry_native) {
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
return;
@ -8598,7 +8675,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ LoadRoot(ip, kFixedArrayMapRootIndex);
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
@ -8821,7 +8898,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#endif // V8_NATIVE_REGEXP
#endif // V8_INTERPRETED_REGEXP
}
@ -9967,3 +10044,5 @@ void StringAddStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

16
deps/v8/src/arm/codegen-arm.h

@ -29,6 +29,7 @@
#define V8_ARM_CODEGEN_ARM_H_
#include "ic-inl.h"
#include "ast.h"
namespace v8 {
namespace internal {
@ -36,6 +37,7 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
class JumpTarget;
class RegisterAllocator;
class RegisterFile;
@ -217,6 +219,10 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store.
static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@ -309,6 +315,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
@ -338,6 +345,15 @@ class CodeGenerator: public AstVisitor {
TypeofState typeof_state,
JumpTarget* slow);
// Support for loading from local/global variables and arguments
// whose location is known unless they are shadowed by
// eval-introduced bindings. Generates no code for unsupported slot
// types and therefore expects to fall through to the slow jump target.
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow,
JumpTarget* done);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for

4
deps/v8/src/arm/constants-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "constants-arm.h"
@ -128,3 +130,5 @@ int Registers::Number(const char* name) {
} } // namespace assembler::arm
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/cpu-arm.cc

@ -32,6 +32,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "cpu.h"
#include "macro-assembler.h"
@ -136,3 +138,5 @@ void CPU::DebugBreak() {
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

11
deps/v8/src/arm/debug-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "debug.h"
@ -170,10 +172,11 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
Generate_DebugBreakCallHelper(masm, 0);
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
}
@ -237,3 +240,5 @@ const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/disasm-arm.cc

@ -56,6 +56,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "constants-arm.h"
#include "disasm.h"
#include "macro-assembler.h"
@ -1356,3 +1358,5 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
} // namespace disasm
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/fast-codegen-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "scopes.h"
@ -236,3 +238,5 @@ void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

8
deps/v8/src/arm/frames-arm.cc

@ -27,12 +27,10 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm-inl.h"
#endif
namespace v8 {
@ -121,3 +119,5 @@ Address InternalFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

170
deps/v8/src/arm/full-codegen-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
@ -397,10 +399,10 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kValue: {
Label done;
__ bind(materialize_true);
__ mov(result_register(), Operand(Factory::true_value()));
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(materialize_false);
__ mov(result_register(), Operand(Factory::false_value()));
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ bind(&done);
switch (location_) {
case kAccumulator:
@ -417,7 +419,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kValueTest:
__ bind(materialize_true);
__ mov(result_register(), Operand(Factory::true_value()));
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
switch (location_) {
case kAccumulator:
break;
@ -430,7 +432,7 @@ void FullCodeGenerator::Apply(Expression::Context context,
case Expression::kTestValue:
__ bind(materialize_false);
__ mov(result_register(), Operand(Factory::false_value()));
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
switch (location_) {
case kAccumulator:
break;
@ -640,11 +642,11 @@ void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ pop(r1); // Key.
__ pop(r2); // Receiver.
__ Call(ic, RelocInfo::CODE_TARGET);
// Value in r0 is ignored (declarations are statements). Receiver
// and key on stack are discarded.
__ Drop(2);
// Value in r0 is ignored (declarations are statements).
}
}
}
@ -661,19 +663,29 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
// Build the shared function info and instantiate the function based
// on it.
Handle<SharedFunctionInfo> function_info =
Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return;
// Create a new closure.
__ mov(r0, Operand(function_info));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && info->num_literals() == 0) {
FastNewClosureStub stub;
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
}
Apply(context_, r0);
}
@ -695,13 +707,12 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
if (var->is_global() && !var->is_this()) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
// object (receiver) in r0.
__ ldr(r0, CodeGenerator::GlobalObject());
__ push(r0);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
DropAndApply(1, context, r0);
Apply(context, r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@ -904,7 +915,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() != Token::INIT_CONST);
// Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
// on the left-hand side.
if (!expr->target()->IsValidLeftHandSide()) {
VisitForEffect(expr->target());
return;
}
// Left-hand side can only be a property, a global or a (parameter or local)
// slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@ -984,6 +1001,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(),
context_);
break;
case NAMED_PROPERTY:
@ -1000,7 +1018,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
__ ldr(r0, MemOperand(sp, 0));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}
@ -1024,14 +1042,13 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op,
Expression::Context context) {
// Three main cases: global variables, lookup slots, and all other
// types of slots. Left-hand-side parameters that rewrite to
// explicit property accesses do not reach here.
// Left-hand sides that rewrite to explicit property accesses do not reach
// here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
Slot* slot = var->slot();
if (var->is_global()) {
ASSERT(!var->is_this());
// Assignment to a global variable. Use inline caching for the
@ -1042,43 +1059,61 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
__ push(result_register()); // Value.
__ mov(r1, Operand(var->name()));
__ stm(db_w, sp, cp.bit() | r1.bit()); // Context and name.
__ CallRuntime(Runtime::kStoreContextSlot, 3);
} else if (var->slot() != NULL) {
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
// of const variables. Const assignments are simply skipped.
Label done;
Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER:
case Slot::LOCAL:
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ ldr(r1, MemOperand(fp, SlotOffset(slot)));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &done);
}
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ ldr(r1, target);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &done);
}
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r2, Operand(offset));
__ RecordWrite(r1, r2, r3);
break;
}
case Slot::LOOKUP:
UNREACHABLE();
// Call the runtime for the assignment. The runtime will ignore
// const reinitialization.
__ push(r0); // Value.
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
if (op == Token::INIT_CONST) {
// The runtime will ignore const redeclaration.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
__ CallRuntime(Runtime::kStoreContextSlot, 3);
}
break;
}
} else {
// Variables rewritten as properties are not treated as variables in
// assignments.
UNREACHABLE();
__ bind(&done);
}
Apply(context, result_register());
}
@ -1103,6 +1138,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
// Load receiver to r1. Leave a copy in the stack if needed for turning the
// receiver into fast case.
if (expr->ends_initialization_block()) {
__ ldr(r1, MemOperand(sp));
} else {
@ -1115,7 +1152,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
__ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value.
// Receiver is under the result value.
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
@ -1143,21 +1181,30 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ pop(r1); // Key.
// Load receiver to r2. Leave a copy in the stack if needed for turning the
// receiver into fast case.
if (expr->ends_initialization_block()) {
__ ldr(r2, MemOperand(sp));
} else {
__ pop(r2);
}
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
__ push(r0); // Result of assignment, saved even if not needed.
// Receiver is under the key and value.
__ ldr(ip, MemOperand(sp, 2 * kPointerSize));
// Receiver is under the result value.
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
DropAndApply(1, context_, r0);
} else {
Apply(context_, r0);
}
// Receiver and key are still on stack.
DropAndApply(2, context_, r0);
}
@ -1165,14 +1212,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
// Evaluate receiver.
VisitForValue(expr->obj(), kStack);
if (key->IsPropertyName()) {
VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
// Drop receiver left on the stack by IC.
DropAndApply(1, context_, r0);
Apply(context_, r0);
} else {
VisitForValue(expr->obj(), kStack);
VisitForValue(expr->key(), kAccumulator);
__ pop(r1);
EmitKeyedPropertyLoad(expr);
@ -1445,13 +1490,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
__ ldr(r0, CodeGenerator::GlobalObject());
__ push(r0);
__ mov(r2, Operand(proxy->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
__ str(r0, MemOperand(sp));
__ push(r0);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@ -1557,10 +1601,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(ip, Operand(Smi::FromInt(0)));
__ push(ip);
}
VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the accumulator.
VisitForValue(prop->obj(), kAccumulator);
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kAccumulator);
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
@ -1631,6 +1678,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case VARIABLE:
if (expr->is_postfix()) {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN,
Expression::kEffect);
// For all contexts except kEffect: We have the result on
// top of the stack.
@ -1639,6 +1687,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN,
context_);
}
break;
@ -1657,15 +1706,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
__ pop(r1); // Key.
__ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
if (expr->is_postfix()) {
__ Drop(2); // Result is on the stack under the key and the receiver.
if (context_ != Expression::kEffect) {
ApplyTOS(context_);
}
} else {
DropAndApply(2, context_, r0);
Apply(context_, r0);
}
break;
}
@ -1877,3 +1927,5 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

401
deps/v8/src/arm/ic-arm.cc

@ -27,7 +27,10 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "assembler-arm.h"
#include "codegen.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
@ -639,7 +642,9 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address - 18 * Assembler::kInstrSize;
inline_end_address -
(CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
@ -669,7 +674,9 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address - 5 * Assembler::kInstrSize;
inline_end_address -
(CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
@ -1204,13 +1211,13 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ Push(r3, r2, r0);
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
@ -1220,12 +1227,13 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ Push(r3, r1, r0);
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@ -1234,147 +1242,135 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
Label slow, fast, array, extra, exit, check_pixel_array;
Label slow, fast, array, extra, check_pixel_array;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
Register elements = r3; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask));
__ tst(key, Operand(kSmiTagMask));
__ b(ne, &slow);
// Check that the object isn't a smi.
__ tst(r3, Operand(kSmiTagMask));
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, &slow);
// Get the map of the object.
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
__ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &slow);
// Check if the object is a JS array or not.
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_ARRAY_TYPE));
// r1 == key.
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JS object.
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
// Object case: Check key against length in the elements array.
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r2, ip);
__ cmp(r4, ip);
__ b(ne, &check_pixel_array);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
__ mov(r4, Operand(key, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
__ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ b(lo, &fast);
// Slow case:
// Slow case, handle jump to runtime.
__ bind(&slow);
// Entry registers are intact.
// r0: value.
// r1: key.
// r2: receiver.
GenerateRuntimeSetProperty(masm);
// Check whether the elements is a pixel array.
// r0: value
// r1: index (as a smi), zero-extended.
// r3: elements array
// r4: elements map.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r2, ip);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ BranchOnNotSmi(r0, &slow);
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ BranchOnNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r4, r0); // Save the value.
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
{ // Clamp the value to [0..255].
Label done;
__ tst(r0, Operand(0xFFFFFF00));
__ tst(r5, Operand(0xFFFFFF00));
__ b(eq, &done);
__ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative.
__ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive.
__ mov(r5, Operand(0), LeaveCC, mi); // 0 if negative.
__ mov(r5, Operand(255), LeaveCC, pl); // 255 if positive.
__ bind(&done);
}
__ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
__ strb(r0, MemOperand(r2, r1));
__ mov(r0, Operand(r4)); // Return the original value.
// Get the pointer to the external array. This clobbers elements.
__ ldr(elements,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
__ Ret();
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
// r0 == value, r1 == key, r2 == elements, r3 == object
__ bind(&extra);
__ b(ne, &slow); // do not leave holes in the array
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
__ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
__ cmp(r1, Operand(ip));
// Condition code from comparing key and array length is still available.
__ b(ne, &slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
__ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
__ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
__ mov(r3, Operand(r2));
// NOTE: Computing the address to store into must take the fact
// that the key has been incremented into account.
int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
((1 << kSmiTagSize) * 2);
__ add(r2, r2, Operand(displacement));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
// Calculate key + 1 as smi.
ASSERT_EQ(0, kSmiTag);
__ add(r4, key, Operand(Smi::FromInt(1)));
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ b(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
// r0 == value, r3 == object
__ bind(&array);
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r1, ip);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ ldr(r1, MemOperand(sp)); // restore key
// r0 == value, r1 == key, r2 == elements, r3 == object.
__ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
__ cmp(r1, Operand(ip));
// Check the key against the length in the array.
__ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ cmp(key, Operand(ip));
__ b(hs, &extra);
__ mov(r3, Operand(r2));
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fall through to fast case.
// Fast case: Do the store.
// r0 == value, r2 == address to store into, r3 == elements
__ bind(&fast);
__ str(r0, MemOperand(r2));
// Fast case, store the value to the elements backing store.
__ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(r5));
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &exit);
__ tst(value, Operand(kSmiTagMask));
__ Ret(eq);
// Update write barrier for the elements array address.
__ sub(r1, r2, Operand(r3));
__ RecordWrite(r3, r1, r2);
__ sub(r4, r5, Operand(elements));
__ RecordWrite(elements, r4, r5);
__ bind(&exit);
__ Ret();
}
@ -1468,20 +1464,23 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- sp[0] : key
// -- sp[1] : receiver
// -----------------------------------
Label slow, check_heap_number;
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(r2, &slow);
__ BranchOnSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3
__ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
__ b(le, &slow);
// Check that the receiver does not require access checks. We need
@ -1491,73 +1490,70 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(r1, &slow);
__ BranchOnNotSmi(key, &slow);
// Check that the elements array is the appropriate type of
// ExternalArray.
// r0: value
// r1: index (smi)
// r2: object
__ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
__ cmp(r3, ip);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the index is in range.
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
__ cmp(r1, ip);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
__ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
__ cmp(r4, ip);
// Unsigned comparison catches both negative and too-large values.
__ b(hs, &slow);
// Handle both smis and HeapNumbers in the fast path. Go to the
// runtime for all other kinds of values.
// r0: value
// r1: index (integer)
// r2: array
__ BranchOnNotSmi(r0, &check_heap_number);
__ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
// r1: index (integer)
// r2: base pointer of external storage
// r3: value (integer)
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
case kExternalFloatArray:
// Need to perform int-to-float conversion.
ConvertIntToFloat(masm, r3, r4, r5, r6);
__ str(r4, MemOperand(r2, r1, LSL, 2));
ConvertIntToFloat(masm, r5, r6, r7, r9);
__ str(r6, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
break;
}
// r0: value
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
// r0: value
// r1: index (integer)
// r2: external array object
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
__ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
__ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
__ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
// r3: base pointer of external storage.
// r4: key (integer).
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
@ -1567,13 +1563,13 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vldr(d0, r3, HeapNumber::kValueOffset);
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
__ vcvt_f32_f64(s0, d0);
__ vmov(r3, s0);
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
Label done;
@ -1582,38 +1578,38 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ vcmp(d0, d0);
// Move vector status bits to normal status bits.
__ vmrs(v8::internal::pc);
__ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0
__ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
__ b(vs, &done);
// Test whether exponent equal to 0x7FF (infinity or NaN)
__ vmov(r4, r3, d0);
// Test whether exponent equal to 0x7FF (infinity or NaN).
__ vmov(r6, r7, d0);
__ mov(r5, Operand(0x7FF00000));
__ and_(r3, r3, Operand(r5));
__ teq(r3, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
__ and_(r6, r6, Operand(r5));
__ teq(r6, Operand(r5));
__ mov(r6, Operand(0), LeaveCC, eq);
// Not infinity or NaN simply convert to int
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
__ vcvt_s32_f64(s0, d0, ne);
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
__ vmov(r3, s0, ne);
__ vmov(r5, s0, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
@ -1621,12 +1617,12 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
}
// r0: original value
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
} else {
// VFP3 is not available do manual conversions
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (array_type == kExternalFloatArray) {
Label done, nan_or_infinity_or_zero;
@ -1638,106 +1634,108 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r6, Operand(r5));
__ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ add(r6,
r6,
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r6, Operand(kBinary32MaxExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r6, Operand(kBinary32MinExponent));
__ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
__ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ str(r5, MemOperand(r3, r4, LSL, 2));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r3, Operand(HeapNumber::kSignMask));
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r6, r6, r7);
__ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
__ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else {
bool is_signed_type = IsElementTypeSigned(array_type);
bool is_signed_type = IsElementTypeSigned(array_type);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r5, Operand(HeapNumber::kExponentMask));
__ and_(r6, r3, Operand(r5), SetCC);
__ mov(r3, Operand(0), LeaveCC, eq);
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0), LeaveCC, eq);
__ b(eq, &done);
__ teq(r6, Operand(r5));
__ mov(r3, Operand(0), LeaveCC, eq);
__ teq(r9, Operand(r7));
__ mov(r5, Operand(0), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
__ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0.
__ mov(r3, Operand(0), LeaveCC, mi);
__ mov(r5, Operand(0), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big than result is minimal value
__ cmp(r6, Operand(meaningfull_bits - 1));
__ mov(r3, Operand(min_value), LeaveCC, ge);
// If exponent is too big than result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
__ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r6, r6, Operand(0));
__ mov(r3, Operand(r3, LSL, r6));
__ rsb(r6, r6, Operand(meaningfull_bits));
__ orr(r3, r3, Operand(r4, LSR, r6));
__ rsb(r9, r9, Operand(0));
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r5, Operand(0));
__ rsb(r3, r3, Operand(0), LeaveCC, ne);
__ teq(r7, Operand(0));
__ rsb(r5, r5, Operand(0), LeaveCC, ne);
__ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
__ strb(r3, MemOperand(r2, r1, LSL, 0));
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ strh(r3, MemOperand(r2, r1, LSL, 1));
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
case kExternalIntArray:
case kExternalUnsignedIntArray:
__ str(r3, MemOperand(r2, r1, LSL, 2));
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
default:
UNREACHABLE();
@ -1748,6 +1746,11 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// Slow case: call runtime.
__ bind(&slow);
// Entry registers are intact.
// r0: value
// r1: key
// r2: receiver
GenerateRuntimeSetProperty(masm);
}
@ -1838,3 +1841,5 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

132
deps/v8/src/arm/jump-target-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@ -47,28 +49,15 @@ void JumpTarget::DoJump() {
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There already a frame expectation at the target.
ASSERT(direction_ == BIDIRECTIONAL);
cgen()->frame()->MergeTo(entry_frame_);
if (entry_frame_set_) {
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
} else {
// Use the current frame as the expected one at the target if necessary.
if (entry_frame_ == NULL) {
entry_frame_ = cgen()->frame();
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
} else {
cgen()->frame()->MergeTo(entry_frame_);
cgen()->DeleteFrame();
}
// The predicate is_linked() should be made true. Its implementation
// detects the presence of a frame pointer in the reaching_frames_ list.
if (!is_linked()) {
reaching_frames_.Add(NULL);
ASSERT(is_linked());
}
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
}
__ jmp(&entry_label_);
}
@ -77,23 +66,19 @@ void JumpTarget::DoJump() {
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (is_bound()) {
ASSERT(direction_ == BIDIRECTIONAL);
if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
cgen()->frame()->MergeTo(entry_frame_);
} else {
// Clone the current frame to use as the expected one at the target if
// necessary.
if (entry_frame_ == NULL) {
entry_frame_ = new VirtualFrame(cgen()->frame());
}
// The predicate is_linked() should be made true. Its implementation
// detects the presence of a frame pointer in the reaching_frames_ list.
if (!is_linked()) {
reaching_frames_.Add(NULL);
ASSERT(is_linked());
if (cc == al) {
cgen()->frame()->MergeTo(&entry_frame_);
} else {
// We can't do conditional merges yet so you have to ensure that all
// conditional branches to the JumpTarget have the same virtual frame.
ASSERT(cgen()->frame()->Equals(&entry_frame_));
}
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
}
@ -113,15 +98,10 @@ void JumpTarget::Call() {
// Calls are always 'forward' so we use a copy of the current frame (plus
// one for a return address) as the expected frame.
ASSERT(entry_frame_ == NULL);
VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
target_frame->Adjust(1);
entry_frame_ = target_frame;
// The predicate is_linked() should now be made true. Its implementation
// detects the presence of a frame pointer in the reaching_frames_ list.
reaching_frames_.Add(NULL);
ASSERT(is_linked());
ASSERT(!entry_frame_set_);
VirtualFrame target_frame = *cgen()->frame();
target_frame.Adjust(1);
set_entry_frame(&target_frame);
__ bl(&entry_label_);
}
@ -136,77 +116,27 @@ void JumpTarget::DoBind() {
if (cgen()->has_valid_frame()) {
// If there is a current frame we can use it on the fall through.
if (entry_frame_ == NULL) {
entry_frame_ = new VirtualFrame(cgen()->frame());
if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame();
entry_frame_set_ = true;
} else {
ASSERT(cgen()->frame()->Equals(entry_frame_));
cgen()->frame()->MergeTo(&entry_frame_);
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
ASSERT(entry_frame_ != NULL);
ASSERT(entry_frame_set_);
RegisterFile empty;
cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
}
// The predicate is_linked() should be made false. Its implementation
// detects the presence (or absence) of frame pointers in the
// reaching_frames_ list. If we inserted a bogus frame to make
// is_linked() true, remove it now.
if (is_linked()) {
reaching_frames_.Clear();
cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
}
__ bind(&entry_label_);
}
void BreakTarget::Jump() {
// On ARM we do not currently emit merge code for jumps, so we need to do
// it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->Drop(count);
DoJump();
}
void BreakTarget::Jump(Result* arg) {
UNIMPLEMENTED();
}
void BreakTarget::Bind() {
#ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the
// jumps to this target.
for (int i = 0; i < reaching_frames_.length(); i++) {
ASSERT(reaching_frames_[i] == NULL ||
reaching_frames_[i]->height() == expected_height_);
}
#endif
// Drop leftover statement state from the frame before merging, even
// on the fall through. This is so we can bind the return target
// with state on the frame.
if (cgen()->has_valid_frame()) {
int count = cgen()->frame()->height() - expected_height_;
// On ARM we do not currently emit merge code at binding sites, so we need
// to do it explicitly here. The only merging necessary is to drop extra
// statement state from the stack.
cgen()->frame()->Drop(count);
}
DoBind();
}
void BreakTarget::Bind(Result* arg) {
UNIMPLEMENTED();
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/macro-assembler-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
@ -1725,3 +1727,5 @@ void CodePatcher::Emit(Address addr) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

5
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "unicode.h"
#include "log.h"
#include "ast.h"
@ -1255,3 +1258,5 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/register-allocator-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
@ -57,3 +59,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

4
deps/v8/src/arm/simulator-arm.cc

@ -29,6 +29,8 @@
#include <cstdarg>
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "disasm.h"
#include "assembler.h"
#include "arm/constants-arm.h"
@ -2731,3 +2733,5 @@ uintptr_t Simulator::PopAddress() {
} } // namespace assembler::arm
#endif // __arm__
#endif // V8_TARGET_ARCH_ARM

52
deps/v8/src/arm/stub-cache-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@ -506,8 +508,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
ASSERT(callback->getter() != NULL);
Label cleanup;
__ pop(scratch2);
__ Push(receiver, scratch2);
__ push(receiver);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
lookup->holder(), scratch1,
@ -526,9 +527,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ TailCallExternalReference(ref, 5, 1);
__ bind(&cleanup);
__ pop(scratch1);
__ pop(scratch2);
__ push(scratch1);
}
}
@ -1618,15 +1617,11 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
// -- r2 : name
// -- r0 : receiver
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
// Load receiver.
__ ldr(r0, MemOperand(sp, 0));
// Check that receiver is not a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
@ -1663,14 +1658,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
int index,
String* name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1685,13 +1678,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
callback, name, &miss, &failure);
@ -1710,14 +1702,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Object* value,
String* name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -1731,14 +1721,12 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(object,
@ -1764,10 +1752,9 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
@ -1974,32 +1961,31 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
__ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
// Check that the name has not changed.
__ cmp(r2, Operand(Handle<String>(name)));
__ cmp(r1, Operand(Handle<String>(name)));
__ b(ne, &miss);
// Load receiver from the stack.
__ ldr(r3, MemOperand(sp));
// r1 is used as scratch register, r3 and r2 might be clobbered.
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
// the miss label is generated.
GenerateStoreField(masm(),
object,
index,
transition,
r3, r2, r1,
r2, r1, r3,
&miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
__ mov(r2, Operand(Handle<String>(name))); // restore name register.
__ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@ -2153,3 +2139,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

53
deps/v8/src/arm/virtual-frame-arm-inl.h

@ -0,0 +1,53 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
#define V8_VIRTUAL_FRAME_ARM_INL_H_
#include "assembler-arm.h"
#include "virtual-frame-arm.h"
namespace v8 {
namespace internal {
// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
// file if such a thing existed.
MemOperand VirtualFrame::ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
// The receiver frame slot.
MemOperand VirtualFrame::Receiver() {
return ParameterAt(-1);
}
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_INL_H_

122
deps/v8/src/arm/virtual-frame-arm.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@ -72,8 +74,15 @@ void VirtualFrame::PopToR0() {
void VirtualFrame::MergeTo(VirtualFrame* expected) {
if (Equals(expected)) return;
MergeTOSTo(expected->top_of_stack_state_);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
@ -154,7 +163,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
UNREACHABLE();
#undef CASE_NUMBER
}
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
top_of_stack_state_ = expected_top_of_stack_state;
}
@ -300,7 +309,8 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
SpillAllButCopyTOSToR0();
PopToR0();
SpillAll();
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
@ -330,8 +340,10 @@ void VirtualFrame::CallKeyedLoadIC() {
void VirtualFrame::CallKeyedStoreIC() {
ASSERT(SpilledScope::is_spilled());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
PopToR1R0();
SpillAll();
EmitPop(r2);
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
@ -418,7 +430,7 @@ void VirtualFrame::Pop() {
void VirtualFrame::EmitPop(Register reg) {
ASSERT(!is_used(reg));
ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ pop(reg);
} else {
@ -498,36 +510,40 @@ Register VirtualFrame::Peek() {
void VirtualFrame::Dup() {
AssertIsNotSpilled();
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
top_of_stack_state_ = R0_TOS;
break;
case R0_TOS:
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_R1_TOS:
__ push(r1);
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ push(r0);
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
default:
UNREACHABLE();
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
top_of_stack_state_ = R0_TOS;
break;
case R0_TOS:
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_R1_TOS:
__ push(r1);
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ push(r0);
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
default:
UNREACHABLE();
}
}
element_count_++;
}
@ -576,7 +592,6 @@ Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
AssertIsNotSpilled();
element_count_--;
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
@ -628,6 +643,39 @@ void VirtualFrame::EmitPush(Register reg) {
}
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
if (dest.is(reg)) {
// We already popped one item off the top of the stack. If the only
// free register is the one we were asked to push then we have been
// asked to push a register that was already in use, which cannot
// happen. It therefore folows that there are two free TOS registers:
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
dest = dest.is(r0) ? r1 : r0;
}
__ mov(dest, reg);
EmitPush(dest);
} else if (this_far_down == 1) {
int virtual_elements = kVirtualElements[top_of_stack_state_];
if (virtual_elements < 2) {
__ str(reg, ElementAt(this_far_down));
} else {
ASSERT(virtual_elements == 2);
ASSERT(!reg.is(r0));
ASSERT(!reg.is(r1));
Register dest = kBottomRegister[top_of_stack_state_];
__ mov(dest, reg);
}
} else {
ASSERT(this_far_down >= 2);
ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
__ str(reg, ElementAt(this_far_down));
}
}
Register VirtualFrame::GetTOSRegister() {
if (SpilledScope::is_spilled()) return r0;
@ -710,3 +758,5 @@ void VirtualFrame::SpillAll() {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

106
deps/v8/src/arm/virtual-frame-arm.h

@ -29,11 +29,14 @@
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
#include "scopes.h"
namespace v8 {
namespace internal {
// This dummy class is only used to create invalid virtual frames.
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
// -------------------------------------------------------------------------
// Virtual frames
//
@ -82,26 +85,8 @@ class VirtualFrame : public ZoneObject {
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
explicit RegisterAllocationScope(CodeGenerator* cgen)
: cgen_(cgen),
old_is_spilled_(SpilledScope::is_spilled_) {
SpilledScope::is_spilled_ = false;
if (old_is_spilled_) {
VirtualFrame* frame = cgen->frame();
if (frame != NULL) {
frame->AssertIsSpilled();
}
}
}
~RegisterAllocationScope() {
SpilledScope::is_spilled_ = old_is_spilled_;
if (old_is_spilled_) {
VirtualFrame* frame = cgen_->frame();
if (frame != NULL) {
frame->SpillAll();
}
}
}
inline explicit RegisterAllocationScope(CodeGenerator* cgen);
inline ~RegisterAllocationScope();
private:
CodeGenerator* cgen_;
@ -116,19 +101,20 @@ class VirtualFrame : public ZoneObject {
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
// Construct an invalid virtual frame, used by JumpTargets.
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); }
inline CodeGenerator* cgen();
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() { return element_count_; }
// The height of the virtual expression stack.
int height() {
return element_count() - expression_base_index();
}
inline int height();
bool is_used(int num) {
switch (num) {
@ -160,10 +146,6 @@ class VirtualFrame : public ZoneObject {
}
}
bool is_used(Register reg) {
return is_used(RegisterAllocator::ToNumber(reg));
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
@ -247,16 +229,13 @@ class VirtualFrame : public ZoneObject {
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
AssertIsSpilled();
return MemOperand(sp, index * kPointerSize);
int adjusted_index = index - kVirtualElements[top_of_stack_state_];
ASSERT(adjusted_index >= 0);
return MemOperand(sp, adjusted_index * kPointerSize);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
ASSERT(index < local_count());
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
@ -268,26 +247,17 @@ class VirtualFrame : public ZoneObject {
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
inline MemOperand ParameterAt(int index);
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
if (arg_count != 0) Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
masm()->CallStub(stub);
}
inline void CallStub(CodeStub* stub, int arg_count);
// Call JS function from top of the stack with arguments
// taken from the stack.
@ -308,7 +278,8 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver is on the stack. Result is returned in r0.
// Call load IC. Receiver is on the stack and is consumed. Result is returned
// in r0.
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
// Call store IC. If the load is contextual, value is found on top of the
@ -320,8 +291,8 @@ class VirtualFrame : public ZoneObject {
// Result is returned in r0.
void CallKeyedLoadIC();
// Call keyed store IC. Key and receiver are on the stack and the value is in
// r0. Result is returned in r0.
// Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in r0.
void CallKeyedStoreIC();
// Call into an IC stub given the number of arguments it removes
@ -386,6 +357,12 @@ class VirtualFrame : public ZoneObject {
void EmitPush(MemOperand operand);
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
// register then this turns into a mov, otherwise an str. Afterwards
// you can still use the register even if it is a register that can be
// used for TOS (r0 or r1).
void SetElementAt(Register reg, int this_far_down);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
@ -449,13 +426,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
int local_count() { return cgen()->scope()->num_stack_slots(); }
inline int parameter_count();
inline int local_count();
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
int frame_pointer() { return parameter_count() + 3; }
inline int frame_pointer();
// The index of the first parameter. The receiver lies below the first
// parameter.
@ -463,26 +440,22 @@ class VirtualFrame : public ZoneObject {
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
int context_index() { return frame_pointer() - 1; }
inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
int function_index() { return frame_pointer() - 2; }
inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
int local0_index() { return frame_pointer() + 2; }
inline int local0_index();
// The index of the base of the expression stack.
int expression_base_index() { return local0_index() + local_count(); }
inline int expression_base_index();
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
int fp_relative(int index) {
ASSERT(index < element_count());
ASSERT(frame_pointer() < element_count()); // FP is on the frame.
return (frame_pointer() - index) * kPointerSize;
}
inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
@ -494,10 +467,13 @@ class VirtualFrame : public ZoneObject {
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state);
inline bool Equals(VirtualFrame* other);
friend class JumpTarget;
friend class DeferredCode;
};

3
deps/v8/src/assembler.cc

@ -424,8 +424,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::EMBEDDED_STRING:
return "embedded string";
case RelocInfo::CONSTRUCT_CALL:
return "code target (js construct call)";
case RelocInfo::CODE_TARGET_CONTEXT:
@ -508,7 +506,6 @@ void RelocInfo::Verify() {
ASSERT(code->address() == HeapObject::cast(found)->address());
break;
}
case RelocInfo::EMBEDDED_STRING:
case RUNTIME_ENTRY:
case JS_RETURN:
case COMMENT:

8
deps/v8/src/assembler.h

@ -121,7 +121,6 @@ class RelocInfo BASE_EMBEDDED {
DEBUG_BREAK,
CODE_TARGET, // code target which is not any of the above.
EMBEDDED_OBJECT,
EMBEDDED_STRING,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
@ -137,7 +136,7 @@ class RelocInfo BASE_EMBEDDED {
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_STRING
LAST_GCED_ENUM = EMBEDDED_OBJECT
};
@ -185,6 +184,11 @@ class RelocInfo BASE_EMBEDDED {
// Apply a relocation by delta bytes
INLINE(void apply(intptr_t delta));
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (eg relative or patched into a series of
// instructions).
bool IsCodedSpecially();
// Read/modify the code target in the branch/call instruction
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY

79
deps/v8/src/ast-inl.h

@ -0,0 +1,79 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "ast.h"
namespace v8 {
namespace internal {
BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
: labels_(labels), type_(type) {
ASSERT(labels == NULL || labels->length() > 0);
}
SwitchStatement::SwitchStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
tag_(NULL), cases_(NULL) {
}
IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
}
Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity),
is_initializer_block_(is_initializer_block) {
}
ForStatement::ForStatement(ZoneStringList* labels)
: IterationStatement(labels),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
peel_this_loop_(false) {
}
ForInStatement::ForInStatement(ZoneStringList* labels)
: IterationStatement(labels), each_(NULL), enumerable_(NULL) {
}
DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
} } // namespace v8::internal

14
deps/v8/src/ast.cc

@ -32,6 +32,8 @@
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
#include "ast-inl.h"
#include "jump-target-inl.h"
namespace v8 {
namespace internal {
@ -786,6 +788,13 @@ Block::Block(Block* other, ZoneList<Statement*>* statements)
}
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
may_have_function_literal_(true) {
}
ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
Expression* expression)
: Statement(other), expression_(expression) {}
@ -809,6 +818,11 @@ IterationStatement::IterationStatement(IterationStatement* other,
: BreakableStatement(other), body_(body) {}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) {
}
ForStatement::ForStatement(ForStatement* other,
Statement* init,
Expression* cond,

42
deps/v8/src/ast.h

@ -351,10 +351,7 @@ class BreakableStatement: public Statement {
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
protected:
BreakableStatement(ZoneStringList* labels, Type type)
: labels_(labels), type_(type) {
ASSERT(labels == NULL || labels->length() > 0);
}
inline BreakableStatement(ZoneStringList* labels, Type type);
explicit BreakableStatement(BreakableStatement* other);
@ -367,10 +364,7 @@ class BreakableStatement: public Statement {
class Block: public BreakableStatement {
public:
Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
statements_(capacity),
is_initializer_block_(is_initializer_block) { }
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
// Construct a clone initialized from the original block and
// a deep copy of all statements of the original block.
@ -437,8 +431,7 @@ class IterationStatement: public BreakableStatement {
BreakTarget* continue_target() { return &continue_target_; }
protected:
explicit IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
explicit inline IterationStatement(ZoneStringList* labels);
// Construct a clone initialized from original and
// a deep copy of the original body.
@ -456,9 +449,7 @@ class IterationStatement: public BreakableStatement {
class DoWhileStatement: public IterationStatement {
public:
explicit DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
explicit inline DoWhileStatement(ZoneStringList* labels);
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
@ -482,11 +473,7 @@ class DoWhileStatement: public IterationStatement {
class WhileStatement: public IterationStatement {
public:
explicit WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
may_have_function_literal_(true) {
}
explicit WhileStatement(ZoneStringList* labels);
void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body);
@ -511,14 +498,7 @@ class WhileStatement: public IterationStatement {
class ForStatement: public IterationStatement {
public:
explicit ForStatement(ZoneStringList* labels)
: IterationStatement(labels),
init_(NULL),
cond_(NULL),
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
peel_this_loop_(false) {}
explicit inline ForStatement(ZoneStringList* labels);
// Construct a for-statement initialized from another for-statement
// and deep copies of all parts of the original statement.
@ -574,8 +554,7 @@ class ForStatement: public IterationStatement {
class ForInStatement: public IterationStatement {
public:
explicit ForInStatement(ZoneStringList* labels)
: IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
explicit inline ForInStatement(ZoneStringList* labels);
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
IterationStatement::Initialize(body);
@ -691,8 +670,7 @@ class WithExitStatement: public Statement {
class CaseClause: public ZoneObject {
public:
CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) { }
CaseClause(Expression* label, ZoneList<Statement*>* statements);
bool is_default() const { return label_ == NULL; }
Expression* label() const {
@ -711,9 +689,7 @@ class CaseClause: public ZoneObject {
class SwitchStatement: public BreakableStatement {
public:
explicit SwitchStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
tag_(NULL), cases_(NULL) { }
explicit inline SwitchStatement(ZoneStringList* labels);
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;

2
deps/v8/src/bootstrapper.cc

@ -1753,8 +1753,8 @@ Genesis::Genesis(Handle<Object> global_object,
CreateNewGlobals(global_template, global_object, &inner_global);
HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function);
if (!InstallNatives()) return;
InstallJSFunctionResultCaches();
if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable();

59
deps/v8/src/builtins.cc

@ -330,22 +330,19 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
}
static bool ArrayPrototypeHasNoElements() {
static bool ArrayPrototypeHasNoElements(Context* global_context,
JSObject* array_proto) {
// This method depends on non writability of Object and Array prototype
// fields.
Context* global_context = Top::context()->global_context();
// Array.prototype
JSObject* proto =
JSObject::cast(global_context->array_function()->prototype());
if (proto->elements() != Heap::empty_fixed_array()) return false;
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
// Hidden prototype
proto = JSObject::cast(proto->GetPrototype());
ASSERT(proto->elements() == Heap::empty_fixed_array());
array_proto = JSObject::cast(array_proto->GetPrototype());
ASSERT(array_proto->elements() == Heap::empty_fixed_array());
// Object.prototype
proto = JSObject::cast(proto->GetPrototype());
if (proto != global_context->initial_object_prototype()) return false;
if (proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(proto->GetPrototype()->IsNull());
array_proto = JSObject::cast(array_proto->GetPrototype());
if (array_proto != global_context->initial_object_prototype()) return false;
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(array_proto->GetPrototype()->IsNull());
return true;
}
@ -368,6 +365,18 @@ static bool IsJSArrayWithFastElements(Object* receiver,
}
static bool IsFastElementMovingAllowed(Object* receiver,
FixedArray** elements) {
if (!IsJSArrayWithFastElements(receiver, elements)) return false;
Context* global_context = Top::context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false;
return ArrayPrototypeHasNoElements(global_context, array_proto);
}
static Object* CallJsBuiltin(const char* name,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope;
@ -465,11 +474,7 @@ BUILTIN(ArrayPop) {
return top;
}
// Remember to check the prototype chain.
JSFunction* array_function =
Top::context()->global_context()->array_function();
JSObject* prototype = JSObject::cast(array_function->prototype());
top = prototype->GetElement(len - 1);
top = array->GetPrototype()->GetElement(len - 1);
return top;
}
@ -478,8 +483,7 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArrayShift", args);
}
JSArray* array = JSArray::cast(receiver);
@ -515,8 +519,7 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArrayUnshift", args);
}
JSArray* array = JSArray::cast(receiver);
@ -565,8 +568,7 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArraySlice", args);
}
JSArray* array = JSArray::cast(receiver);
@ -635,8 +637,7 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
if (!IsJSArrayWithFastElements(receiver, &elms)
|| !ArrayPrototypeHasNoElements()) {
if (!IsFastElementMovingAllowed(receiver, &elms)) {
return CallJsBuiltin("ArraySplice", args);
}
JSArray* array = JSArray::cast(receiver);
@ -788,7 +789,10 @@ BUILTIN(ArraySplice) {
BUILTIN(ArrayConcat) {
if (!ArrayPrototypeHasNoElements()) {
Context* global_context = Top::context()->global_context();
JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype());
if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
return CallJsBuiltin("ArrayConcat", args);
}
@ -798,7 +802,8 @@ BUILTIN(ArrayConcat) {
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin("ArrayConcat", args);
}

3
deps/v8/src/codegen.h

@ -28,7 +28,6 @@
#ifndef V8_CODEGEN_H_
#define V8_CODEGEN_H_
#include "ast.h"
#include "code-stubs.h"
#include "runtime.h"
#include "type-info.h"
@ -115,7 +114,7 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomHeapNumber, 0, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \

16
deps/v8/src/compiler.cc

@ -120,7 +120,21 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
bool force_full_compiler = false;
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
// On ia32 the full compiler can compile all code whereas the other platforms
// the constructs supported is checked by the associated syntax checker. When
// --always-full-compiler is used on ia32 the syntax checker is still in
// effect, but there is a special flag --force-full-compiler to ignore the
// syntax checker completely and use the full compiler for all code. Also
// when debugging on ia32 the full compiler will be used for all code.
force_full_compiler =
Debugger::IsDebuggerActive() || FLAG_force_full_compiler;
#endif
if (force_full_compiler) {
return FullCodeGenerator::MakeCode(info);
} else if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
FullCodeGenSyntaxChecker checker;
checker.Check(function);
if (checker.has_supported_syntax()) {

2
deps/v8/src/cpu-profiler-inl.h

@ -54,7 +54,7 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddAlias(alias, start);
code_map->AddAlias(start, entry, code_start);
}

47
deps/v8/src/cpu-profiler.cc

@ -141,13 +141,15 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Address start) {
Address start,
int security_token_id) {
CodeEventsContainer evt_rec;
CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
rec->type = CodeEventRecord::CODE_ALIAS;
rec->order = ++enqueue_order_;
rec->alias = alias;
rec->start = start;
rec->start = alias;
rec->entry = generator_->NewCodeEntry(security_token_id);
rec->code_start = start;
events_buffer_.Enqueue(evt_rec);
}
@ -257,26 +259,30 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
}
CpuProfile* CpuProfiler::StopProfiling(String* title) {
return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
return is_profiling() ?
singleton_->StopCollectingProfile(security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->profiles()->length();
// The count of profiles doesn't depend on a security token.
return singleton_->profiles_->Profiles(CodeEntry::kNoSecurityToken)->length();
}
CpuProfile* CpuProfiler::GetProfile(int index) {
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->profiles()->at(index);
const int token = singleton_->token_enumerator_->GetTokenId(security_token);
return singleton_->profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->GetProfile(uid);
const int token = singleton_->token_enumerator_->GetTokenId(security_token);
return singleton_->profiles_->GetProfile(token, uid);
}
@ -348,8 +354,15 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
int security_token_id = CodeEntry::kNoSecurityToken;
if (function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
function->context()->global_context()->security_token());
}
singleton_->processor_->FunctionCreateEvent(
function->address(), function->code()->address());
function->address(),
function->code()->address(),
security_token_id);
}
@ -388,12 +401,14 @@ void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
CpuProfiler::CpuProfiler()
: profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL) {
}
CpuProfiler::~CpuProfiler() {
delete token_enumerator_;
delete profiles_;
}
@ -438,7 +453,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken,
title,
actual_sampling_rate);
if (result != NULL) {
result->Print();
}
@ -446,10 +463,12 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
}
CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
String* title) {
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile();
return profiles_->StopProfiling(title, actual_sampling_rate);
int token = token_enumerator_->GetTokenId(security_token);
return profiles_->StopProfiling(token, title, actual_sampling_rate);
}

16
deps/v8/src/cpu-profiler.h

@ -41,7 +41,7 @@ class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@ -94,8 +94,9 @@ class CodeDeleteEventRecord : public CodeEventRecord {
class CodeAliasEventRecord : public CodeEventRecord {
public:
Address alias;
Address start;
CodeEntry* entry;
Address code_start;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -151,7 +152,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void FunctionCreateEvent(Address alias, Address start);
void FunctionCreateEvent(Address alias, Address start, int security_token_id);
void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
@ -212,10 +213,10 @@ class CpuProfiler {
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
static CpuProfile* StopProfiling(String* title);
static CpuProfile* StopProfiling(Object* security_token, String* title);
static int GetProfilesCount();
static CpuProfile* GetProfile(int index);
static CpuProfile* FindProfile(unsigned uid);
static CpuProfile* GetProfile(Object* security_token, int index);
static CpuProfile* FindProfile(Object* security_token, unsigned uid);
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent();
@ -252,11 +253,12 @@ class CpuProfiler {
void StartCollectingProfile(String* title);
void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(String* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile();
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
TokenEnumerator* token_enumerator_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;

57
deps/v8/src/d8.js

@ -341,6 +341,11 @@ function DebugRequest(cmd_line) {
this.request_ = this.breakCommandToJSONRequest_(args);
break;
case 'breakpoints':
case 'bb':
this.request_ = this.breakpointsCommandToJSONRequest_(args);
break;
case 'clear':
this.request_ = this.clearCommandToJSONRequest_(args);
break;
@ -770,6 +775,15 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
};
DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
if (args && args.length > 0) {
throw new Error('Unexpected arguments.');
}
var request = this.createRequest('listbreakpoints');
return request.toJSONProtocol();
};
// Create a JSON request for the clear command.
DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
@ -948,6 +962,39 @@ function DebugResponseDetails(response) {
details.text = result;
break;
case 'listbreakpoints':
result = 'breakpoints: (' + body.breakpoints.length + ')';
for (var i = 0; i < body.breakpoints.length; i++) {
var breakpoint = body.breakpoints[i];
result += '\n id=' + breakpoint.number;
result += ' type=' + breakpoint.type;
if (breakpoint.script_id) {
result += ' script_id=' + breakpoint.script_id;
}
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
result += ' line=' + breakpoint.line;
if (breakpoint.column != null) {
result += ' column=' + breakpoint.column;
}
if (breakpoint.groupId) {
result += ' groupId=' + breakpoint.groupId;
}
if (breakpoint.ignoreCount) {
result += ' ignoreCount=' + breakpoint.ignoreCount;
}
if (breakpoint.active === false) {
result += ' inactive';
}
if (breakpoint.condition) {
result += ' condition=' + breakpoint.condition;
}
result += ' hit_count=' + breakpoint.hit_count;
}
details.text = result;
break;
case 'backtrace':
if (body.totalFrames == 0) {
result = '(empty stack)';
@ -1136,8 +1183,8 @@ function DebugResponseDetails(response) {
default:
details.text =
'Response for unknown command \'' + response.command + '\'' +
' (' + json_response + ')';
'Response for unknown command \'' + response.command() + '\'' +
' (' + response.raw_json() + ')';
}
} catch (e) {
details.text = 'Error: "' + e + '" formatting response';
@ -1153,6 +1200,7 @@ function DebugResponseDetails(response) {
* @constructor
*/
function ProtocolPackage(json) {
this.raw_json_ = json;
this.packet_ = JSON.parse(json);
this.refs_ = [];
if (this.packet_.refs) {
@ -1243,6 +1291,11 @@ ProtocolPackage.prototype.lookup = function(handle) {
}
ProtocolPackage.prototype.raw_json = function() {
return this.raw_json_;
}
function ProtocolValue(value, packet) {
this.value_ = value;
this.packet_ = packet;

11
deps/v8/src/date.js

@ -238,7 +238,15 @@ function LocalTime(time) {
return time + DaylightSavingsOffset(time) + local_time_offset;
}
var ltcache = {
key: null,
val: null
};
function LocalTimeNoCheck(time) {
var ltc = ltcache;
if (%_ObjectEquals(time, ltc.key)) return ltc.val;
if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
return $NaN;
}
@ -252,7 +260,8 @@ function LocalTimeNoCheck(time) {
} else {
var dst_offset = DaylightSavingsOffset(time);
}
return time + local_time_offset + dst_offset;
ltc.key = time;
return (ltc.val = time + local_time_offset + dst_offset);
}

31
deps/v8/src/debug-debugger.js

@ -1266,6 +1266,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.clearBreakPointRequest_(request, response);
} else if (request.command == 'clearbreakpointgroup') {
this.clearBreakPointGroupRequest_(request, response);
} else if (request.command == 'listbreakpoints') {
this.listBreakpointsRequest_(request, response);
} else if (request.command == 'backtrace') {
this.backtraceRequest_(request, response);
} else if (request.command == 'frame') {
@ -1581,6 +1583,35 @@ DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, resp
response.body = { breakpoint: break_point }
}
DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
var array = [];
for (var i = 0; i < script_break_points.length; i++) {
var break_point = script_break_points[i];
var description = {
number: break_point.number(),
line: break_point.line(),
column: break_point.column(),
groupId: break_point.groupId(),
hit_count: break_point.hit_count(),
active: break_point.active(),
condition: break_point.condition(),
ignoreCount: break_point.ignoreCount()
}
if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
description.type = 'scriptId';
description.script_id = break_point.script_id();
} else {
description.type = 'scriptName';
description.script_name = break_point.script_name();
}
array.push(description);
}
response.body = { breakpoints: array }
}
DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
// Get the number of frames.

72
deps/v8/src/debug.cc

@ -72,6 +72,17 @@ static Handle<Code> ComputeCallDebugPrepareStepIn(int argc) {
}
static v8::Handle<v8::Context> GetDebugEventContext() {
Handle<Context> context = Debug::debugger_entry()->GetContext();
// Top::context() may have been NULL when "script collected" event occured.
if (*context == NULL) {
return v8::Local<v8::Context>();
}
Handle<Context> global_context(context->global_context());
return v8::Utils::ToLocal(global_context);
}
BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
BreakLocatorType type) {
debug_info_ = debug_info;
@ -2112,12 +2123,14 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
if (event_listener_->IsProxy()) {
// C debug event listener.
Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
v8::Debug::EventCallback callback =
FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
callback(event,
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
v8::Utils::ToLocal(event_data),
v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
v8::Debug::EventCallback2 callback =
FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
EventDetailsImpl event_details(
event,
Handle<JSObject>::cast(exec_state),
event_data,
event_listener_data_);
callback(event_details);
} else {
// JavaScript debug event listener.
ASSERT(event_listener_->IsJSFunction());
@ -2643,14 +2656,10 @@ v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
Handle<Context> context = Debug::debugger_entry()->GetContext();
// Top::context() may have been NULL when "script collected" event occured.
if (*context == NULL) {
ASSERT(event_ == v8::ScriptCollected);
return v8::Local<v8::Context>();
}
Handle<Context> global_context(context->global_context());
return v8::Utils::ToLocal(global_context);
v8::Handle<v8::Context> context = GetDebugEventContext();
// Top::context() may be NULL when "script collected" event occures.
ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
return GetDebugEventContext();
}
@ -2659,6 +2668,41 @@ v8::Debug::ClientData* MessageImpl::GetClientData() const {
}
EventDetailsImpl::EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<Object> callback_data)
: event_(event),
exec_state_(exec_state),
event_data_(event_data),
callback_data_(callback_data) {}
DebugEvent EventDetailsImpl::GetEvent() const {
return event_;
}
v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
return GetDebugEventContext();
}
v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
return v8::Utils::ToLocal(callback_data_);
}
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}

24
deps/v8/src/debug.h

@ -524,6 +524,27 @@ class MessageImpl: public v8::Debug::Message {
};
// Details of the debug event delivered to the debug event listener.
class EventDetailsImpl : public v8::Debug::EventDetails {
public:
EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<Object> callback_data);
virtual DebugEvent GetEvent() const;
virtual v8::Handle<v8::Object> GetExecutionState() const;
virtual v8::Handle<v8::Object> GetEventData() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Handle<v8::Value> GetCallbackData() const;
private:
DebugEvent event_; // Debug event causing the break.
Handle<JSObject> exec_state_; // Current execution state.
Handle<JSObject> event_data_; // Data associated with the event.
Handle<Object> callback_data_; // User data passed with the callback when
// it was registered.
};
// Message send by user to v8 debugger or debugger output message.
// In addition to command text it may contain a pointer to some user data
// which are expected to be passed along with the command reponse to message
@ -693,8 +714,9 @@ class Debugger {
static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
private:
static bool IsDebuggerActive();
private:
static void ListenersChanged();
static Mutex* debugger_access_; // Mutex guarding debugger variables.

17
deps/v8/src/flag-definitions.h

@ -100,10 +100,10 @@ private:
DEFINE_bool(debug_code, false,
"generate extra code (comments, assertions) for debugging")
DEFINE_bool(emit_branch_hints, false, "emit branch hints")
DEFINE_bool(push_pop_elimination, true,
"eliminate redundant push/pops in assembly code")
DEFINE_bool(print_push_pop_elimination, false,
"print elimination of redundant push/pops in assembly code")
DEFINE_bool(peephole_optimization, true,
"perform peephole optimizations in assembly code")
DEFINE_bool(print_peephole_optimization, false,
"print peephole optimizations in assembly code")
DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
@ -149,6 +149,10 @@ DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
DEFINE_bool(always_full_compiler, false,
"try to use the dedicated run-once backend for all code")
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
DEFINE_bool(force_full_compiler, false,
"force use of the dedicated run-once backend for all code")
#endif
DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
@ -182,6 +186,11 @@ DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false,
"print one trace line following each garbage collection")
DEFINE_bool(trace_gc_nvp, false,
"print one detailed trace line in name=value format "
"after each garbage collection")
DEFINE_bool(print_cumulative_gc_stat, false,
"print cumulative GC statistics in name=value format on exit")
DEFINE_bool(trace_gc_verbose, false,
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,

35
deps/v8/src/full-codegen.cc

@ -760,11 +760,6 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
}
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
@ -810,6 +805,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Visit(stmt->body());
__ bind(loop_statement.continue_target());
// Check stack before looping.
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
@ -872,11 +868,6 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Comment cmnt(masm_, "[ TryCatchStatement");
SetStatementPosition(stmt);
@ -995,12 +986,6 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
}
void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done;
@ -1034,6 +1019,24 @@ void FullCodeGenerator::VisitLiteral(Literal* expr) {
}
void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<SharedFunctionInfo> function_info =
Compiler::BuildFunctionInfo(expr, script(), this);
if (HasStackOverflow()) return;
EmitNewClosure(function_info);
}
void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
EmitNewClosure(expr->shared_function_info());
}
void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.

76
deps/v8/src/full-codegen.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -31,6 +31,7 @@
#include "v8.h"
#include "ast.h"
#include "compiler.h"
namespace v8 {
namespace internal {
@ -229,8 +230,6 @@ class FullCodeGenerator: public AstVisitor {
return stack_depth + kForInStackElementCount;
}
private:
// TODO(lrn): Check that this value is correct when implementing
// for-in.
static const int kForInStackElementCount = 5;
DISALLOW_COPY_AND_ASSIGN(ForIn);
};
@ -258,12 +257,22 @@ class FullCodeGenerator: public AstVisitor {
// context.
void DropAndApply(int count, Expression::Context context, Register reg);
// Set up branch labels for a test expression.
void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false);
// Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context.
void Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false);
// Emit code to convert constant control flow (true or false) into
// the result expected according to an expression context.
void Apply(Expression::Context context, bool flag);
// Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details.
@ -348,6 +357,12 @@ class FullCodeGenerator: public AstVisitor {
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
// Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value.
void EmitDeclaration(Variable* variable,
Variable::Mode mode,
FunctionLiteral* function);
// Platform-specific return sequence
void EmitReturnSequence(int position);
@ -355,9 +370,48 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
// Platform-specific code for inline runtime calls.
void EmitInlineRuntimeCall(CallRuntime* expr);
void EmitIsSmi(ZoneList<Expression*>* arguments);
void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
void EmitIsObject(ZoneList<Expression*>* arguments);
void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
void EmitIsFunction(ZoneList<Expression*>* arguments);
void EmitIsArray(ZoneList<Expression*>* arguments);
void EmitIsRegExp(ZoneList<Expression*>* arguments);
void EmitIsConstructCall(ZoneList<Expression*>* arguments);
void EmitObjectEquals(ZoneList<Expression*>* arguments);
void EmitArguments(ZoneList<Expression*>* arguments);
void EmitArgumentsLength(ZoneList<Expression*>* arguments);
void EmitClassOf(ZoneList<Expression*>* arguments);
void EmitValueOf(ZoneList<Expression*>* arguments);
void EmitSetValueOf(ZoneList<Expression*>* arguments);
void EmitNumberToString(ZoneList<Expression*>* arguments);
void EmitCharFromCode(ZoneList<Expression*>* arguments);
void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
void EmitStringCompare(ZoneList<Expression*>* arguments);
void EmitStringAdd(ZoneList<Expression*>* arguments);
void EmitLog(ZoneList<Expression*>* arguments);
void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
void EmitSubString(ZoneList<Expression*>* arguments);
void EmitRegExpExec(ZoneList<Expression*>* arguments);
void EmitMathPow(ZoneList<Expression*>* arguments);
void EmitMathSin(ZoneList<Expression*>* arguments);
void EmitMathCos(ZoneList<Expression*>* arguments);
void EmitMathSqrt(ZoneList<Expression*>* arguments);
void EmitCallFunction(ZoneList<Expression*>* arguments);
void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
void EmitSwapElements(ZoneList<Expression*>* arguments);
void EmitGetFromCache(ZoneList<Expression*>* arguments);
// Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context);
// Platform-specific support for allocating a new closure based on
// the given function info.
void EmitNewClosure(Handle<SharedFunctionInfo> info);
// Platform-specific support for compiling assignments.
// Load a value from a named property.
@ -372,9 +426,15 @@ class FullCodeGenerator: public AstVisitor {
// of the stack and the right one in the accumulator.
void EmitBinaryOp(Token::Value op, Expression::Context context);
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator.
void EmitAssignment(Expression* expr);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var, Expression::Context context);
void EmitVariableAssignment(Variable* var,
Token::Value op,
Expression::Context context);
// Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator.
@ -385,6 +445,14 @@ class FullCodeGenerator: public AstVisitor {
// accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr);
// Helper for compare operations. Expects the null-value in a register.
void EmitNullCompare(bool strict,
Register obj,
Register null_const,
Label* if_true,
Label* if_false,
Register scratch);
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);

18
deps/v8/src/globals.h

@ -59,6 +59,24 @@ namespace internal {
#error Host architecture was not detected as supported by v8
#endif
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
!defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_TARGET_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
#elif defined(_MIPS_ARCH_MIPS32R2)
#define V8_TARGET_ARCH_MIPS 1
#else
#error Target architecture was not detected as supported by v8
#endif
#endif
// Check for supported combinations of host and target architectures.
#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
#error Target architecture ia32 is only supported on ia32 host

142
deps/v8/src/heap.cc

@ -115,8 +115,11 @@ int Heap::external_allocation_limit_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
int Heap::ms_count_ = 0;
int Heap::gc_count_ = 0;
GCTracer* Heap::tracer_ = NULL;
int Heap::unflattened_strings_length_ = 0;
int Heap::always_allocate_scope_depth_ = 0;
@ -130,6 +133,11 @@ int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG
int GCTracer::alive_after_last_gc_ = 0;
double GCTracer::last_gc_end_timestamp_ = 0.0;
int GCTracer::max_gc_pause_ = 0;
int GCTracer::max_alive_after_gc_ = 0;
int GCTracer::min_in_mutator_ = kMaxInt;
int Heap::Capacity() {
if (!HasBeenSetup()) return 0;
@ -570,7 +578,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
VerifySymbolTable();
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::ExternalScope scope(tracer);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_prologue_callback_();
}
@ -596,14 +604,16 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
} else {
tracer_ = tracer;
Scavenge();
tracer_ = NULL;
}
Counters::objs_since_last_young.Set(0);
if (collector == MARK_COMPACTOR) {
DisableAssertNoAllocation allow_allocation;
GCTracer::ExternalScope scope(tracer);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
GlobalHandles::PostGarbageCollectionProcessing();
}
@ -627,7 +637,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
ASSERT(!allocation_allowed_);
GCTracer::ExternalScope scope(tracer);
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
global_gc_epilogue_callback_();
}
VerifySymbolTable();
@ -636,7 +646,11 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
mc_count_++;
if (MarkCompactCollector::IsCompacting()) {
mc_count_++;
} else {
ms_count_++;
}
tracer->set_full_gc_count(mc_count_);
LOG(ResourceEvent("markcompact", "begin"));
@ -1179,6 +1193,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
node->set_size(object_size);
*p = target;
tracer()->increment_promoted_objects_size(object_size);
return;
}
} else {
@ -1214,6 +1229,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
(*p)->Iterate(&v);
#endif
}
tracer()->increment_promoted_objects_size(object_size);
return;
}
}
@ -2064,7 +2080,7 @@ Object* Heap::AllocateSubString(String* buffer,
}
// Make an attempt to flatten the buffer to reduce access time.
buffer->TryFlatten();
buffer = buffer->TryFlattenGetString();
Object* result = buffer->IsAsciiRepresentation()
? AllocateRawAsciiString(length, pretenure )
@ -3760,6 +3776,17 @@ void Heap::SetStackLimits() {
void Heap::TearDown() {
if (FLAG_print_cumulative_gc_stat) {
PrintF("\n\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
PrintF("\n\n");
}
GlobalHandles::TearDown();
ExternalStringTable::TearDown();
@ -4235,33 +4262,114 @@ void Heap::TracePathToGlobal() {
#endif
static int CountTotalHolesSize() {
int holes_size = 0;
OldSpaces spaces;
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
holes_size += space->Waste() + space->AvailableFree();
}
return holes_size;
}
GCTracer::GCTracer()
: start_time_(0.0),
start_size_(0.0),
external_time_(0.0),
start_size_(0),
gc_count_(0),
full_gc_count_(0),
is_compacting_(false),
marked_count_(0) {
marked_count_(0),
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0) {
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
previous_has_compacted_ = MarkCompactCollector::HasCompacted();
previous_marked_count_ = MarkCompactCollector::previous_marked_count();
if (!FLAG_trace_gc) return;
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = SizeOfHeapObjects();
start_size_ = Heap::SizeOfObjects();
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
scopes_[i] = 0;
}
in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
if (last_gc_end_timestamp_ > 0) {
spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
}
}
GCTracer::~GCTracer() {
if (!FLAG_trace_gc) return;
// Printf ONE line iff flag is set.
int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
int external_time = static_cast<int>(external_time_);
PrintF("%s %.1f -> %.1f MB, ",
CollectorString(), start_size_, SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
PrintF("%d ms.\n", time);
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
bool first_gc = (last_gc_end_timestamp_ == 0);
alive_after_last_gc_ = Heap::SizeOfObjects();
last_gc_end_timestamp_ = OS::TimeCurrentMillis();
int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
max_gc_pause_ = Max(max_gc_pause_, time);
max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
if (!first_gc) {
min_in_mutator_ = Min(min_in_mutator_,
static_cast<int>(spent_in_mutator_));
}
}
if (!FLAG_trace_gc_nvp) {
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
PrintF("%s %.1f -> %.1f MB, ",
CollectorString(),
static_cast<double>(start_size_) / MB,
SizeOfHeapObjects());
if (external_time > 0) PrintF("%d / ", external_time);
PrintF("%d ms.\n", time);
} else {
PrintF("pause=%d ", time);
PrintF("mutator=%d ",
static_cast<int>(spent_in_mutator_));
PrintF("gc=");
switch (collector_) {
case SCAVENGER:
PrintF("s");
break;
case MARK_COMPACTOR:
PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
break;
default:
UNREACHABLE();
}
PrintF(" ");
PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("total_size_before=%d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects());
PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
PrintF("holes_size_after=%d ", CountTotalHolesSize());
PrintF("allocated=%d ", allocated_since_last_gc_);
PrintF("promoted=%d ", promoted_objects_size_);
PrintF("\n");
}
#if defined(ENABLE_LOGGING_AND_PROFILING)
Heap::PrintShortHeapStatistics();

78
deps/v8/src/heap.h

@ -981,6 +981,8 @@ class Heap : public AllStatic {
static void ClearJSFunctionResultCaches();
static GCTracer* tracer() { return tracer_; }
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
@ -1020,6 +1022,7 @@ class Heap : public AllStatic {
static int PromotedExternalMemorySize();
static int mc_count_; // how many mark-compact collections happened
static int ms_count_; // how many mark-sweep collections happened
static int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
@ -1223,6 +1226,8 @@ class Heap : public AllStatic {
SharedFunctionInfo* shared,
Object* prototype);
static GCTracer* tracer_;
// Initializes the number to string cache based on the max semispace size.
static Object* InitializeNumberStringCache();
@ -1629,19 +1634,30 @@ class DisableAssertNoAllocation {
class GCTracer BASE_EMBEDDED {
public:
// Time spent while in the external scope counts towards the
// external time in the tracer and will be reported separately.
class ExternalScope BASE_EMBEDDED {
class Scope BASE_EMBEDDED {
public:
explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
enum ScopeId {
EXTERNAL,
MC_MARK,
MC_SWEEP,
MC_COMPACT,
kNumberOfScopes
};
Scope(GCTracer* tracer, ScopeId scope)
: tracer_(tracer),
scope_(scope) {
start_time_ = OS::TimeCurrentMillis();
}
~ExternalScope() {
tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
~Scope() {
ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
}
private:
GCTracer* tracer_;
ScopeId scope_;
double start_time_;
};
@ -1667,6 +1683,19 @@ class GCTracer BASE_EMBEDDED {
int marked_count() { return marked_count_; }
void increment_promoted_objects_size(int object_size) {
promoted_objects_size_ += object_size;
}
// Returns maximum GC pause.
static int get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC.
static int get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections.
static int get_min_in_mutator() { return min_in_mutator_; }
private:
// Returns a string matching the collector.
const char* CollectorString();
@ -1677,12 +1706,9 @@ class GCTracer BASE_EMBEDDED {
}
double start_time_; // Timestamp set in the constructor.
double start_size_; // Size of objects in heap set in constructor.
int start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector.
// Keep track of the amount of time spent in external callbacks.
double external_time_;
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
@ -1706,6 +1732,38 @@ class GCTracer BASE_EMBEDDED {
// The count from the end of the previous full GC. Will be zero if there
// was no previous full GC.
int previous_marked_count_;
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
int in_free_list_or_wasted_before_gc_;
// Difference between space used in the heap at the beginning of the current
// collection and the end of the previous collection.
int allocated_since_last_gc_;
// Amount of time spent in mutator that is time elapsed between end of the
// previous collection and the beginning of the current one.
double spent_in_mutator_;
// Size of objects promoted during the current collection.
int promoted_objects_size_;
// Maximum GC pause.
static int max_gc_pause_;
// Maximum size of objects alive after GC.
static int max_alive_after_gc_;
// Minimal interval between two subsequent collections.
static int min_in_mutator_;
// Size of objects alive after last GC.
static int alive_after_last_gc_;
static double last_gc_end_timestamp_;
};

5
deps/v8/src/ia32/assembler-ia32-inl.h

@ -159,11 +159,6 @@ Immediate::Immediate(const ExternalReference& ext) {
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Immediate::Immediate(const char* s) {
x_ = reinterpret_cast<int32_t>(s);
rmode_ = RelocInfo::EMBEDDED_STRING;
}
Immediate::Immediate(Label* internal_offset) {
x_ = reinterpret_cast<int32_t>(internal_offset);

40
deps/v8/src/ia32/assembler-ia32.cc

@ -36,6 +36,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "disassembler.h"
#include "macro-assembler.h"
#include "serialize.h"
@ -160,6 +162,15 @@ const int RelocInfo::kApplyMask =
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
// specially coded on IA32 means that it is a relative address, as used by
// branch instructions. These are also the ones that need changing when a
// code object moves.
return (1 << rmode_) & kApplyMask;
}
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions.
for (int i = 0; i < instruction_count; i++) {
@ -433,7 +444,7 @@ void Assembler::push(const Operand& src) {
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
// (last_pc_ != NULL) is rolled into the above check.
// If a last_pc_ is set, we need to make sure that there has not been any
// relocation information generated between the last instruction and this
@ -443,7 +454,7 @@ void Assembler::pop(Register dst) {
int push_reg_code = instr & 0x7;
if (push_reg_code == dst.code()) {
pc_ = last_pc_;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
}
} else {
@ -452,7 +463,7 @@ void Assembler::pop(Register dst) {
Register src = { push_reg_code };
EnsureSpace ensure_space(this);
emit_operand(dst, Operand(src));
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
}
}
@ -466,7 +477,7 @@ void Assembler::pop(Register dst) {
last_pc_[0] = 0x8b;
last_pc_[1] = op1;
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
}
return;
@ -483,7 +494,7 @@ void Assembler::pop(Register dst) {
last_pc_[1] = 0xc4;
last_pc_[2] = 0x04;
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
}
return;
@ -498,7 +509,7 @@ void Assembler::pop(Register dst) {
// change to
// 31c0 xor eax,eax
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@ -521,7 +532,7 @@ void Assembler::pop(Register dst) {
// b8XX000000 mov eax,0x000000XX
}
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@ -533,7 +544,7 @@ void Assembler::pop(Register dst) {
last_pc_ = NULL;
// change to
// b8XXXXXXXX mov eax,0xXXXXXXXX
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
}
return;
@ -776,6 +787,13 @@ void Assembler::rep_stos() {
}
void Assembler::stos() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xAB);
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -813,7 +831,7 @@ void Assembler::add(Register dst, const Operand& src) {
void Assembler::add(const Operand& dst, const Immediate& x) {
ASSERT(reloc_info_writer.last_pc() != NULL);
if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
byte instr = last_pc_[0];
if ((instr & 0xf8) == 0x50) {
// Last instruction was a push. Check whether this is a pop without a
@ -822,7 +840,7 @@ void Assembler::add(const Operand& dst, const Immediate& x) {
(x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
pc_ = last_pc_;
last_pc_ = NULL;
if (FLAG_print_push_pop_elimination) {
if (FLAG_print_peephole_optimization) {
PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
}
return;
@ -2528,3 +2546,5 @@ void LogGeneratedCodeCoverage(const char* file_line) {
#endif
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

2
deps/v8/src/ia32/assembler-ia32.h

@ -194,7 +194,6 @@ inline Hint NegateHint(Hint hint) {
class Immediate BASE_EMBEDDED {
public:
inline explicit Immediate(int x);
inline explicit Immediate(const char* s);
inline explicit Immediate(const ExternalReference& ext);
inline explicit Immediate(Handle<Object> handle);
inline explicit Immediate(Smi* value);
@ -551,6 +550,7 @@ class Assembler : public Malloced {
// Repetitive string instructions.
void rep_movs();
void rep_stos();
void stos();
// Exchange two registers
void xchg(Register dst, Register src);

29
deps/v8/src/ia32/builtins-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
namespace v8 {
@ -806,6 +808,7 @@ static void AllocateJSArray(MacroAssembler* masm,
Label* gc_required) {
ASSERT(scratch.is(edi)); // rep stos destination
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
// Load the initial map from the array function.
__ mov(elements_array,
@ -863,15 +866,22 @@ static void AllocateJSArray(MacroAssembler* masm,
if (fill_with_hole) {
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ push(eax);
__ mov(eax, Factory::the_hole_value());
__ cld();
// Do not use rep stos when filling less than kRepStosThreshold
// words.
const int kRepStosThreshold = 16;
Label loop, entry, done;
__ cmp(ecx, kRepStosThreshold);
__ j(below, &loop); // Note: ecx > 0.
__ rep_stos();
// Restore saved registers.
__ pop(eax);
__ jmp(&done);
__ bind(&loop);
__ stos();
__ bind(&entry);
__ cmp(edi, Operand(elements_array_end));
__ j(below, &loop);
__ bind(&done);
}
}
@ -970,13 +980,14 @@ static void ArrayNativeCode(MacroAssembler* masm,
AllocateJSArray(masm,
edi,
ecx,
eax,
ebx,
eax,
edx,
edi,
true,
&prepare_generic_code_call);
__ IncrementCounter(&Counters::array_function_native, 1);
__ mov(eax, ebx);
__ pop(ebx);
if (construct_call) {
__ pop(edi);
@ -1067,7 +1078,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
Label generic_array_code;
// Get the Array function.
GenerateLoadArrayFunction(masm, edi);
@ -1247,3 +1258,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

263
deps/v8/src/ia32/codegen-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
@ -2979,6 +2981,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Record the position for debugging purposes.
@ -4227,8 +4230,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array.
__ mov(edx, frame_->ElementAt(2));
__ mov(ebx, Operand(edx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(ebx, FixedArrayElementOperand(edx, eax));
// Get the expected map from the stack or a zero map in the
// permanent slow case eax: current iteration count ebx: i'th entry
@ -4724,43 +4726,14 @@ Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
// If there was no control flow to slow, we can exit early.
if (!slow.is_linked()) return result;
done.Jump(&result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
result = allocator()->Allocate();
ASSERT(result.is_valid());
__ mov(result.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
result,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ cmp(result.reg(), Factory::the_hole_value());
done.Branch(not_equal, &result);
__ mov(result.reg(), Factory::undefined_value());
}
// There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around
// it.
done.Jump(&result);
}
}
// Generate fast case for loading from slots that correspond to
// local/global variables or arguments unless they are shadowed by
// eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(slot,
typeof_state,
&result,
&slow,
&done);
slow.Bind();
// A runtime call is inevitable. We eagerly sync frame elements
@ -4929,6 +4902,68 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
}
void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
Result* result,
JumpTarget* slow,
JumpTarget* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
*result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
done->Jump(result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots.
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
*result = allocator()->Allocate();
ASSERT(result->is_valid());
__ mov(result->reg(),
ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
if (potential_slot->var()->mode() == Variable::CONST) {
__ cmp(result->reg(), Factory::the_hole_value());
done->Branch(not_equal, result);
__ mov(result->reg(), Factory::undefined_value());
}
done->Jump(result);
} else if (rewrite != NULL) {
// Generate fast case for calls of an argument function.
Property* property = rewrite->AsProperty();
if (property != NULL) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
Literal* key_literal = property->key()->AsLiteral();
if (obj_proxy != NULL &&
key_literal != NULL &&
obj_proxy->IsArguments() &&
key_literal->handle()->IsSmi()) {
// Load arguments object if there are no eval-introduced
// variables. Then load the argument from the arguments
// object using keyed load.
Result arguments = allocator()->Allocate();
ASSERT(arguments.is_valid());
__ mov(arguments.reg(),
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
arguments,
slow));
frame_->Push(&arguments);
frame_->Push(key_literal->handle());
*result = EmitKeyedLoad();
done->Jump(result);
}
}
}
}
}
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
@ -5698,6 +5733,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Prepare the stack for the call to ResolvePossiblyDirectEval.
@ -5747,6 +5783,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Push the name of the function onto the frame.
@ -5765,59 +5802,26 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// JavaScript examples:
//
// with (obj) foo(1, 2, 3) // foo is in obj
// with (obj) foo(1, 2, 3) // foo may be in obj.
//
// function f() {};
// function g() {
// eval(...);
// f(); // f could be in extension object
// f(); // f could be in extension object.
// }
// ----------------------------------
JumpTarget slow;
JumpTarget done;
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
JumpTarget slow, done;
Result function;
if (var->mode() == Variable::DYNAMIC_GLOBAL) {
function = LoadFromGlobalSlotCheckExtensions(var->slot(),
NOT_INSIDE_TYPEOF,
&slow);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
} else if (var->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = var->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads because eval forces arguments
// access to be through the arguments object.
if (potential_slot != NULL) {
// Allocate a fresh register to use as a temp in
// ContextSlotOperandCheckExtensions and to hold the result
// value.
function = allocator()->Allocate();
ASSERT(function.is_valid());
__ mov(function.reg(),
ContextSlotOperandCheckExtensions(potential_slot,
function,
&slow));
JumpTarget push_function_and_receiver;
if (potential_slot->var()->mode() == Variable::CONST) {
__ cmp(function.reg(), Factory::the_hole_value());
push_function_and_receiver.Branch(not_equal, &function);
__ mov(function.reg(), Factory::undefined_value());
}
push_function_and_receiver.Bind(&function);
frame_->Push(&function);
LoadGlobalReceiver();
done.Jump();
}
}
// Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(var->slot(),
NOT_INSIDE_TYPEOF,
&function,
&slow,
&done);
slow.Bind();
// Enter the runtime system to load the function from the context.
@ -5839,7 +5843,18 @@ void CodeGenerator::VisitCall(Call* node) {
ASSERT(!allocator()->is_used(edx));
frame_->EmitPush(edx);
done.Bind();
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
JumpTarget call;
call.Jump();
done.Bind(&function);
frame_->Push(&function);
LoadGlobalReceiver();
call.Bind();
}
// Call the function.
CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@ -5874,6 +5889,7 @@ void CodeGenerator::VisitCall(Call* node) {
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
frame_->SpillTop();
}
// Push the name of the function onto the frame.
@ -6149,11 +6165,11 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
__ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
destination()->false_target()->Branch(less);
destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
obj.Unuse();
map.Unuse();
destination()->Split(less_equal);
destination()->Split(below_equal);
}
@ -6266,7 +6282,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
__ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
__ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
null.Branch(less);
null.Branch(below);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@ -6634,16 +6650,6 @@ class DeferredSearchCache: public DeferredCode {
};
// Return a position of the element at |index_as_smi| + |additional_offset|
// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand ArrayElement(Register array,
Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
void DeferredSearchCache::Generate() {
Label first_loop, search_further, second_loop, cache_miss;
@ -6660,11 +6666,11 @@ void DeferredSearchCache::Generate() {
__ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
__ j(less, &search_further);
__ cmp(key_, ArrayElement(cache_, dst_));
__ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
__ j(not_equal, &first_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ mov(dst_, ArrayElement(cache_, dst_, 1));
__ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&search_further);
@ -6678,11 +6684,11 @@ void DeferredSearchCache::Generate() {
__ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ j(less_equal, &cache_miss);
__ cmp(key_, ArrayElement(cache_, dst_));
__ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
__ j(not_equal, &second_loop);
__ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ mov(dst_, ArrayElement(cache_, dst_, 1));
__ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
__ jmp(exit_label());
__ bind(&cache_miss);
@ -6730,7 +6736,7 @@ void DeferredSearchCache::Generate() {
__ pop(ebx); // restore the key
__ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
// Store key.
__ mov(ArrayElement(ecx, edx), ebx);
__ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
// Store value.
@ -6738,7 +6744,7 @@ void DeferredSearchCache::Generate() {
__ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
__ add(Operand(edx), Immediate(Smi::FromInt(1)));
__ mov(ebx, eax);
__ mov(ArrayElement(ecx, edx), ebx);
__ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
__ RecordWrite(ecx, 0, ebx, edx);
if (!dst_.is(eax)) {
@ -6785,11 +6791,11 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
// tmp.reg() now holds finger offset as a smi.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(tmp.reg(), FieldOperand(cache.reg(),
JSFunctionResultCache::kFingerOffset));
__ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg()));
JSFunctionResultCache::kFingerOffset));
__ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
deferred->Branch(not_equal);
__ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1));
__ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
deferred->BindExit();
frame_->Push(&tmp);
@ -6866,7 +6872,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
deferred->Branch(less);
deferred->Branch(below);
__ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
__ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(not_zero);
@ -6888,14 +6894,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(not_zero);
// Bring addresses into index1 and index2.
__ lea(index1.reg(), FieldOperand(tmp1.reg(),
index1.reg(),
times_half_pointer_size, // index1 is Smi
FixedArray::kHeaderSize));
__ lea(index2.reg(), FieldOperand(tmp1.reg(),
index2.reg(),
times_half_pointer_size, // index2 is Smi
FixedArray::kHeaderSize));
__ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
__ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
// Swap elements.
__ mov(object.reg(), Operand(index1.reg(), 0));
@ -8192,11 +8192,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
__ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
destination()->false_target()->Branch(less);
destination()->false_target()->Branch(below);
__ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
answer.Unuse();
map.Unuse();
destination()->Split(less_equal);
destination()->Split(below_equal);
} else {
// Uncommon case: typeof testing against a string literal that is
// never returned from the typeof operator.
@ -8768,11 +8768,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
deferred->Branch(not_equal);
// Store the value.
__ mov(Operand(tmp.reg(),
key.reg(),
times_2,
FixedArray::kHeaderSize - kHeapObjectTag),
result.reg());
__ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
deferred->BindExit();
@ -9074,7 +9070,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize));
ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
__ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
__ cmp(ecx, Factory::undefined_value());
__ j(equal, &slow_case);
@ -10296,6 +10292,11 @@ void IntegerConvert(MacroAssembler* masm,
Label done, right_exponent, normal_exponent;
Register scratch = ebx;
Register scratch2 = edi;
if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
CpuFeatures::Scope scope(SSE2);
__ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
return;
}
if (!type_info.IsInteger32() || !use_sse3) {
// Get exponent word.
__ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
@ -11601,7 +11602,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
Label first_non_object;
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &first_non_object);
__ j(below, &first_non_object);
// Return non-zero (eax is not zero)
Label return_not_equal;
@ -11618,7 +11619,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(greater_equal, &return_not_equal);
__ j(above_equal, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
__ cmp(ecx, ODDBALL_TYPE);
@ -12266,9 +12267,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // eax - object map
__ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &slow, not_taken);
__ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(greater, &slow, not_taken);
__ j(above, &slow, not_taken);
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
@ -12296,9 +12297,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ cmp(ecx, FIRST_JS_OBJECT_TYPE);
__ j(less, &slow, not_taken);
__ j(below, &slow, not_taken);
__ cmp(ecx, LAST_JS_OBJECT_TYPE);
__ j(greater, &slow, not_taken);
__ j(above, &slow, not_taken);
// Register mapping:
// eax is object map.
@ -13296,3 +13297,5 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

21
deps/v8/src/ia32/codegen-ia32.h

@ -28,7 +28,9 @@
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
#include "ast.h"
#include "ic-inl.h"
#include "jump-target-heavy.h"
namespace v8 {
namespace internal {
@ -343,6 +345,15 @@ class CodeGenerator: public AstVisitor {
// expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Return a position of the element at |index_as_smi| + |additional_offset|
// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
static Operand FixedArrayElementOperand(Register array,
Register index_as_smi,
int additional_offset = 0) {
int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
}
private:
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
@ -454,6 +465,16 @@ class CodeGenerator: public AstVisitor {
TypeofState typeof_state,
JumpTarget* slow);
// Support for loading from local/global variables and arguments
// whose location is known unless they are shadowed by
// eval-introduced bindings. Generates no code for unsupported slot
// types and therefore expects to fall through to the slow jump target.
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
Result* result,
JumpTarget* slow,
JumpTarget* done);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);

4
deps/v8/src/ia32/cpu-ia32.cc

@ -33,6 +33,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "cpu.h"
#include "macro-assembler.h"
@ -77,3 +79,5 @@ void CPU::DebugBreak() {
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/debug-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "debug.h"
@ -261,3 +263,5 @@ const int Debug::kFrameDropperFrameSize = 5;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

6
deps/v8/src/ia32/disasm-ia32.cc

@ -30,6 +30,9 @@
#include <stdarg.h>
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "disasm.h"
namespace disasm {
@ -90,6 +93,7 @@ static ByteMnemonic zero_operands_instr[] = {
{0x99, "cdq", UNSET_OP_ORDER},
{0x9B, "fwait", UNSET_OP_ORDER},
{0xFC, "cld", UNSET_OP_ORDER},
{0xAB, "stos", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
};
@ -1438,3 +1442,5 @@ int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
} // namespace disasm
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "fast-codegen.h"
#include "data-flow.h"
@ -948,3 +950,5 @@ void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/frames-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "frames-inl.h"
namespace v8 {
@ -109,3 +111,5 @@ Address InternalFrame::GetCallerStackPointer() const {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

1493
deps/v8/src/ia32/full-codegen-ia32.cc

File diff suppressed because it is too large

6
deps/v8/src/ia32/ic-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "ic-inl.h"
#include "runtime.h"
@ -868,7 +870,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ecx: key (a smi)
// edx: receiver
// edi: FixedArray receiver->elements
__ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
__ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
// Update write barrier for the elements array address.
__ mov(edx, Operand(eax));
__ RecordWrite(edi, 0, edx, ecx);
@ -1643,3 +1645,5 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/jump-target-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
@ -431,3 +433,5 @@ void BreakTarget::Bind(Result* arg) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
@ -1706,3 +1708,5 @@ CodePatcher::~CodePatcher() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

91
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "unicode.h"
#include "log.h"
#include "ast.h"
@ -51,7 +54,7 @@ namespace internal {
* - esp : points to tip of C stack.
* - ecx : points to tip of backtrack stack
*
* The registers eax, ebx and ecx are free to use for computations.
* The registers eax and ebx are free to use for computations.
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
@ -72,8 +75,6 @@ namespace internal {
* - backup of caller ebx
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - Boolean at start (if 1, we are starting at the start of the string,
* otherwise 0)
* - register 0 ebp[-4] (Only positions must be stored in the first
* - register 1 ebp[-8] num_saved_registers_ registers)
* - ...
@ -178,8 +179,8 @@ void RegExpMacroAssemblerIA32::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ cmp(Operand(ebp, kAtStart), Immediate(0));
BranchOrBacktrack(equal, &not_at_start);
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
__ lea(eax, Operand(esi, edi, times_1, 0));
__ cmp(eax, Operand(ebp, kInputStart));
@ -190,8 +191,8 @@ void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ cmp(Operand(ebp, kAtStart), Immediate(0));
BranchOrBacktrack(equal, on_not_at_start);
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(eax, Operand(esi, edi, times_1, 0));
__ cmp(eax, Operand(ebp, kInputStart));
@ -209,6 +210,15 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
#ifdef DEBUG
// If input is ASCII, don't even bother calling here if the string to
// match contains a non-ascii character.
if (mode_ == ASCII) {
for (int i = 0; i < str.length(); i++) {
ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
}
}
#endif
int byte_length = str.length() * char_size();
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
@ -222,14 +232,56 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
on_failure = &backtrack_label_;
}
for (int i = 0; i < str.length(); i++) {
// Do one character test first to minimize loading for the case that
// we don't match at all (loading more than one character introduces that
// chance of reading unaligned and reading across cache boundaries).
// If the first character matches, expect a larger chance of matching the
// string, and start loading more characters at a time.
if (mode_ == ASCII) {
__ cmpb(Operand(esi, edi, times_1, byte_offset),
static_cast<int8_t>(str[0]));
} else {
// Don't use 16-bit immediate. The size changing prefix throws off
// pre-decoding.
__ movzx_w(eax,
Operand(esi, edi, times_1, byte_offset));
__ cmp(eax, static_cast<int32_t>(str[0]));
}
BranchOrBacktrack(not_equal, on_failure);
__ lea(ebx, Operand(esi, edi, times_1, 0));
for (int i = 1, n = str.length(); i < n;) {
if (mode_ == ASCII) {
__ cmpb(Operand(esi, edi, times_1, byte_offset + i),
static_cast<int8_t>(str[i]));
if (i <= n - 4) {
int combined_chars =
(static_cast<uint32_t>(str[i + 0]) << 0) |
(static_cast<uint32_t>(str[i + 1]) << 8) |
(static_cast<uint32_t>(str[i + 2]) << 16) |
(static_cast<uint32_t>(str[i + 3]) << 24);
__ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
i += 4;
} else {
__ cmpb(Operand(ebx, byte_offset + i),
static_cast<int8_t>(str[i]));
i += 1;
}
} else {
ASSERT(mode_ == UC16);
__ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)),
Immediate(str[i]));
if (i <= n - 2) {
__ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
Immediate(*reinterpret_cast<const int*>(&str[i])));
i += 2;
} else {
// Avoid a 16-bit immediate operation. It uses the length-changing
// 0x66 prefix which causes pre-decoder misprediction and pipeline
// stalls. See
// "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
// (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
__ movzx_w(eax,
Operand(ebx, byte_offset + i * sizeof(uc16)));
__ cmp(eax, static_cast<int32_t>(str[i]));
i += 1;
}
}
BranchOrBacktrack(not_equal, on_failure);
}
@ -625,7 +677,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(edi);
__ push(ebx); // Callee-save on MacOS.
__ push(Immediate(0)); // Make room for "input start - 1" constant.
__ push(Immediate(0)); // Make room for "at start" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@ -677,14 +728,6 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
// Register ebx still holds -stringIndex.
__ test(ebx, Operand(ebx));
__ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
__ mov(Operand(ebp, kAtStart), ecx);
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
// Fill saved registers with initial value = start offset - 1
// Fill in stack push order, to avoid accessing across an unwritten
@ -712,8 +755,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
// Load previous char as initial value of current-character.
Label at_start;
__ cmp(Operand(ebp, kAtStart), Immediate(0));
__ j(not_equal, &at_start);
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
__ j(equal, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_);
__ bind(&at_start);
@ -1201,3 +1244,5 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
#endif // V8_INTERPRETED_REGEXP
}} // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

3
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -132,9 +132,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kBackup_edi = kBackup_esi - kPointerSize;
static const int kBackup_ebx = kBackup_edi - kPointerSize;
static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
static const int kAtStart = kInputStartMinusOne - kPointerSize;
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kAtStart - kPointerSize;
static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;

4
deps/v8/src/ia32/register-allocator-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
@ -151,3 +153,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/stub-cache-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "ic-inl.h"
#include "codegen-inl.h"
#include "stub-cache.h"
@ -2387,3 +2389,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

4
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
@ -1310,3 +1312,5 @@ void VirtualFrame::Push(Expression* expr) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

40
deps/v8/src/ia32/virtual-frame-ia32.h

@ -28,9 +28,10 @@
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "type-info.h"
#include "codegen.h"
#include "register-allocator.h"
#include "scopes.h"
#include "type-info.h"
namespace v8 {
namespace internal {
@ -97,23 +98,16 @@ class VirtualFrame: public ZoneObject {
return register_locations_[num];
}
int register_location(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)];
}
inline int register_location(Register reg);
void set_register_location(Register reg, int index) {
register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
inline void set_register_location(Register reg, int index);
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)]
!= kIllegalIndex;
}
inline bool is_used(Register reg);
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
@ -150,6 +144,9 @@ class VirtualFrame: public ZoneObject {
// (ie, they all have frame-external references).
Register SpillAnyRegister();
// Spill the top element of the frame.
void SpillTop() { SpillElementAt(element_count() - 1); }
// Sync the range of elements in [begin, end] with memory.
void SyncRange(int begin, int end);
@ -217,10 +214,7 @@ class VirtualFrame: public ZoneObject {
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
}
inline void SetElementAt(int index, Handle<Object> value);
void PushElementAt(int index) {
PushFrameSlotAt(element_count() - index - 1);
@ -315,10 +309,7 @@ class VirtualFrame: public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
Result CallStub(CodeStub* stub, int arg_count) {
PrepareForCall(arg_count, arg_count);
return RawCallStub(stub);
}
inline Result CallStub(CodeStub* stub, int arg_count);
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@ -361,7 +352,7 @@ class VirtualFrame: public ZoneObject {
Result CallStoreIC(Handle<String> name, bool is_contextual);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. Key and receiver are not dropped.
// of the frame. All three are dropped.
Result CallKeyedStoreIC();
// Call call IC. Function name, arguments, and receiver are found on top
@ -473,12 +464,9 @@ class VirtualFrame: public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively.
int parameter_count() {
return cgen()->scope()->num_parameters();
}
int local_count() {
return cgen()->scope()->num_stack_slots();
}
inline int parameter_count();
inline int local_count();
// The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address

63
deps/v8/src/jump-target-heavy.cc

@ -35,6 +35,9 @@ namespace v8 {
namespace internal {
bool JumpTarget::compiling_deferred_code_ = false;
void JumpTarget::Jump(Result* arg) {
ASSERT(cgen()->has_valid_frame());
@ -360,4 +363,64 @@ DeferredCode::DeferredCode()
}
}
void JumpTarget::Unuse() {
reaching_frames_.Clear();
merge_labels_.Clear();
entry_frame_ = NULL;
entry_label_.Unuse();
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL);
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
}
// -------------------------------------------------------------------------
// BreakTarget implementation.
void BreakTarget::set_direction(Directionality direction) {
JumpTarget::set_direction(direction);
ASSERT(cgen()->has_valid_frame());
expected_height_ = cgen()->frame()->height();
}
void BreakTarget::CopyTo(BreakTarget* destination) {
ASSERT(destination != NULL);
destination->direction_ = direction_;
destination->reaching_frames_.Rewind(0);
destination->reaching_frames_.AddAll(reaching_frames_);
destination->merge_labels_.Rewind(0);
destination->merge_labels_.AddAll(merge_labels_);
destination->entry_frame_ = entry_frame_;
destination->entry_label_ = entry_label_;
destination->expected_height_ = expected_height_;
}
void BreakTarget::Branch(Condition cc, Hint hint) {
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
Jump(); // May emit merge code here.
fall_through.Bind();
} else {
DoBranch(cc, hint);
}
}
} } // namespace v8::internal

242
deps/v8/src/jump-target-heavy.h

@ -0,0 +1,242 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_JUMP_TARGET_HEAVY_H_
#define V8_JUMP_TARGET_HEAVY_H_
#include "macro-assembler.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
// Forward declarations.
class FrameElement;
class Result;
class VirtualFrame;
// -------------------------------------------------------------------------
// Jump targets
//
// A jump target is an abstraction of a basic-block entry in generated
// code. It collects all the virtual frames reaching the block by
// forward jumps and pairs them with labels for the merge code along
// all forward-reaching paths. When bound, an expected frame for the
// block is determined and code is generated to merge to the expected
// frame. For backward jumps, the merge code is generated at the edge
// leaving the predecessor block.
//
// A jump target must have been reached via control flow (either by
// jumping, branching, or falling through) at the time it is bound.
// In particular, this means that at least one of the control-flow
// graph edges reaching the target must be a forward edge.
class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
public:
// Forward-only jump targets can only be reached by forward CFG edges.
enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
// Construct a jump target used to generate code and to provide
// access to a current frame.
explicit JumpTarget(Directionality direction)
: direction_(direction),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL) {
}
// Construct a jump target.
JumpTarget()
: direction_(FORWARD_ONLY),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL) {
}
virtual ~JumpTarget() {}
// Set the direction of the jump target.
virtual void set_direction(Directionality direction) {
direction_ = direction;
}
// Treat the jump target as a fresh one. The state is reset.
void Unuse();
inline CodeGenerator* cgen();
Label* entry_label() { return &entry_label_; }
VirtualFrame* entry_frame() const { return entry_frame_; }
void set_entry_frame(VirtualFrame* frame) {
entry_frame_ = frame;
}
// Predicates testing the state of the encapsulated label.
bool is_bound() const { return entry_label_.is_bound(); }
bool is_linked() const {
return !is_bound() && !reaching_frames_.is_empty();
}
bool is_unused() const {
// This is !is_bound() && !is_linked().
return !is_bound() && reaching_frames_.is_empty();
}
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
virtual void Jump(Result* arg);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch. The arg is a result that is live both at
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
virtual void Branch(Condition cc,
Result* arg0,
Result* arg1,
Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
virtual void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
// frame except for an extra return address on top of it. The frame
// after the call is the same as the frame before the call.
void Call();
static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag;
}
protected:
// Directionality flag set at initialization time.
Directionality direction_;
// A list of frames reaching this block via forward jumps.
ZoneList<VirtualFrame*> reaching_frames_;
// A parallel list of labels for merge code.
ZoneList<Label> merge_labels_;
// The frame used on entry to the block and expected at backward
// jumps to the block. Set when the jump target is bound, but may
// or may not be set for forward-only blocks.
VirtualFrame* entry_frame_;
// The actual entry label of the block.
Label entry_label_;
// Implementations of Jump, Branch, and Bind with all arguments and
// return values using the virtual frame.
void DoJump();
void DoBranch(Condition cc, Hint hint);
void DoBind();
private:
static bool compiling_deferred_code_;
// Add a virtual frame reaching this labeled block via a forward jump,
// and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
// Perform initialization required during entry frame computation
// after setting the virtual frame element at index in frame to be
// target.
inline void InitializeEntryElement(int index, FrameElement* target);
// Compute a frame to use for entry to this block.
void ComputeEntryFrame();
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
};
// -------------------------------------------------------------------------
// Break targets
//
// A break target is a jump target that can be used to break out of a
// statement that keeps extra state on the stack (eg, for/in or
// try/finally). They know the expected stack height at the target
// and will drop state from nested statements as part of merging.
//
// Break targets are used for return, break, and continue targets.
class BreakTarget : public JumpTarget {
public:
// Construct a break target.
BreakTarget() {}
virtual ~BreakTarget() {}
// Set the direction of the break target.
virtual void set_direction(Directionality direction);
// Copy the state of this break target to the destination. The
// lists of forward-reaching frames and merge-point labels are
// copied. All virtual frame pointers are copied, not the
// pointed-to frames. The previous state of the destination is
// overwritten, without deallocating pointed-to virtual frames.
void CopyTo(BreakTarget* destination);
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
virtual void Jump(Result* arg);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
// Bind a break target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
// Setter for expected height.
void set_expected_height(int expected) { expected_height_ = expected; }
private:
// The expected height of the expression stack where the target will
// be bound, statically known at initialization time.
int expected_height_;
DISALLOW_COPY_AND_ASSIGN(BreakTarget);
};
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_HEAVY_H_

14
deps/v8/src/jump-target-light-inl.h

@ -33,10 +33,20 @@
namespace v8 {
namespace internal {
void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
UNIMPLEMENTED();
// Construct a jump target.
JumpTarget::JumpTarget(Directionality direction)
: entry_frame_set_(false),
entry_frame_(kInvalidVirtualFrameInitializer) {
}
JumpTarget::JumpTarget()
: entry_frame_set_(false),
entry_frame_(kInvalidVirtualFrameInitializer) {
}
BreakTarget::BreakTarget() { }
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_LIGHT_INL_H_

83
deps/v8/src/jump-target-light.cc

@ -34,53 +34,76 @@ namespace v8 {
namespace internal {
void JumpTarget::Jump(Result* arg) {
UNIMPLEMENTED();
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
UNIMPLEMENTED();
#ifdef DEBUG
CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
#endif
}
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
UNIMPLEMENTED();
}
// -------------------------------------------------------------------------
// BreakTarget implementation.
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
UNIMPLEMENTED();
void BreakTarget::SetExpectedHeight() {
expected_height_ = cgen()->frame()->height();
}
void JumpTarget::Bind(Result* arg) {
UNIMPLEMENTED();
}
void BreakTarget::Jump() {
ASSERT(cgen()->has_valid_frame());
void JumpTarget::Bind(Result* arg0, Result* arg1) {
UNIMPLEMENTED();
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
cgen()->frame()->Drop(count);
}
DoJump();
}
void JumpTarget::ComputeEntryFrame() {
UNIMPLEMENTED();
void BreakTarget::Branch(Condition cc, Hint hint) {
if (cc == al) {
Jump();
return;
}
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
// Emit merge code.
cgen()->frame()->Drop(count);
DoJump();
fall_through.Bind();
} else {
DoBranch(cc, hint);
}
}
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
#endif
void BreakTarget::Bind() {
if (cgen()->has_valid_frame()) {
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
cgen()->frame()->Drop(count);
}
}
DoBind();
}
} } // namespace v8::internal

187
deps/v8/src/jump-target-light.h

@ -0,0 +1,187 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_JUMP_TARGET_LIGHT_H_
#define V8_JUMP_TARGET_LIGHT_H_
#include "macro-assembler.h"
#include "zone-inl.h"
#include "virtual-frame.h"
namespace v8 {
namespace internal {
// Forward declarations.
class FrameElement;
class Result;
// -------------------------------------------------------------------------
// Jump targets
//
// A jump target is an abstraction of a basic-block entry in generated
// code. It collects all the virtual frames reaching the block by
// forward jumps and pairs them with labels for the merge code along
// all forward-reaching paths. When bound, an expected frame for the
// block is determined and code is generated to merge to the expected
// frame. For backward jumps, the merge code is generated at the edge
// leaving the predecessor block.
//
// A jump target must have been reached via control flow (either by
// jumping, branching, or falling through) at the time it is bound.
// In particular, this means that at least one of the control-flow
// graph edges reaching the target must be a forward edge.
class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
public:
// Forward-only jump targets can only be reached by forward CFG edges.
enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
// Construct a jump target.
explicit inline JumpTarget(Directionality direction);
inline JumpTarget();
virtual ~JumpTarget() {}
void Unuse() {
entry_frame_set_ = false;
entry_label_.Unuse();
}
inline CodeGenerator* cgen();
const VirtualFrame* entry_frame() const {
return entry_frame_set_ ? &entry_frame_ : NULL;
}
void set_entry_frame(VirtualFrame* frame) {
entry_frame_ = *frame;
entry_frame_set_ = true;
}
// Predicates testing the state of the encapsulated label.
bool is_bound() const { return entry_label_.is_bound(); }
bool is_linked() const { return entry_label_.is_linked(); }
bool is_unused() const { return entry_label_.is_unused(); }
// Copy the state of this jump target to the destination.
inline void CopyTo(JumpTarget* destination) {
*destination = *this;
}
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch. The arg is a result that is live both at
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
// frame except for an extra return address on top of it. The frame
// after the call is the same as the frame before the call.
void Call();
protected:
// Has an entry frame been found?
bool entry_frame_set_;
// The frame used on entry to the block and expected at backward
// jumps to the block. Set the first time something branches to this
// jump target.
VirtualFrame entry_frame_;
// The actual entry label of the block.
Label entry_label_;
// Implementations of Jump, Branch, and Bind with all arguments and
// return values using the virtual frame.
void DoJump();
void DoBranch(Condition cc, Hint hint);
void DoBind();
};
// -------------------------------------------------------------------------
// Break targets
//
// A break target is a jump target that can be used to break out of a
// statement that keeps extra state on the stack (eg, for/in or
// try/finally). They know the expected stack height at the target
// and will drop state from nested statements as part of merging.
//
// Break targets are used for return, break, and continue targets.
class BreakTarget : public JumpTarget {
public:
// Construct a break target.
inline BreakTarget();
virtual ~BreakTarget() {}
// Copy the state of this jump target to the destination.
inline void CopyTo(BreakTarget* destination) {
*destination = *this;
}
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint);
// Bind a break target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
// Setter for expected height.
void set_expected_height(int expected) { expected_height_ = expected; }
// Uses the current frame to set the expected height.
void SetExpectedHeight();
private:
// The expected height of the expression stack where the target will
// be bound, statically known at initialization time.
int expected_height_;
};
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_LIGHT_H_

64
deps/v8/src/jump-target.cc

@ -37,17 +37,6 @@ namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
bool JumpTarget::compiling_deferred_code_ = false;
void JumpTarget::Unuse() {
reaching_frames_.Clear();
merge_labels_.Clear();
entry_frame_ = NULL;
entry_label_.Unuse();
}
void JumpTarget::Jump() {
DoJump();
}
@ -63,58 +52,6 @@ void JumpTarget::Bind() {
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL);
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
}
// -------------------------------------------------------------------------
// BreakTarget implementation.
void BreakTarget::set_direction(Directionality direction) {
JumpTarget::set_direction(direction);
ASSERT(cgen()->has_valid_frame());
expected_height_ = cgen()->frame()->height();
}
void BreakTarget::CopyTo(BreakTarget* destination) {
ASSERT(destination != NULL);
destination->direction_ = direction_;
destination->reaching_frames_.Rewind(0);
destination->reaching_frames_.AddAll(reaching_frames_);
destination->merge_labels_.Rewind(0);
destination->merge_labels_.AddAll(merge_labels_);
destination->entry_frame_ = entry_frame_;
destination->entry_label_ = entry_label_;
destination->expected_height_ = expected_height_;
}
void BreakTarget::Branch(Condition cc, Hint hint) {
ASSERT(cgen()->has_valid_frame());
int count = cgen()->frame()->height() - expected_height_;
if (count > 0) {
// We negate and branch here rather than using DoBranch's negate
// and branch. This gives us a hook to remove statement state
// from the frame.
JumpTarget fall_through;
// Branch to fall through will not negate, because it is a
// forward-only target.
fall_through.Branch(NegateCondition(cc), NegateHint(hint));
Jump(); // May emit merge code here.
fall_through.Bind();
} else {
DoBranch(cc, hint);
}
}
// -------------------------------------------------------------------------
// ShadowTarget implementation.
@ -151,5 +88,4 @@ void ShadowTarget::StopShadowing() {
#endif
}
} } // namespace v8::internal

218
deps/v8/src/jump-target.h

@ -28,216 +28,21 @@
#ifndef V8_JUMP_TARGET_H_
#define V8_JUMP_TARGET_H_
#include "macro-assembler.h"
#include "zone-inl.h"
#if V8_TARGET_ARCH_IA32
#include "jump-target-heavy.h"
#elif V8_TARGET_ARCH_X64
#include "jump-target-heavy.h"
#elif V8_TARGET_ARCH_ARM
#include "jump-target-light.h"
#elif V8_TARGET_ARCH_MIPS
#include "jump-target-light.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
// Forward declarations.
class FrameElement;
class Result;
class VirtualFrame;
// -------------------------------------------------------------------------
// Jump targets
//
// A jump target is an abstraction of a basic-block entry in generated
// code. It collects all the virtual frames reaching the block by
// forward jumps and pairs them with labels for the merge code along
// all forward-reaching paths. When bound, an expected frame for the
// block is determined and code is generated to merge to the expected
// frame. For backward jumps, the merge code is generated at the edge
// leaving the predecessor block.
//
// A jump target must have been reached via control flow (either by
// jumping, branching, or falling through) at the time it is bound.
// In particular, this means that at least one of the control-flow
// graph edges reaching the target must be a forward edge.
class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
public:
// Forward-only jump targets can only be reached by forward CFG edges.
enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
// Construct a jump target used to generate code and to provide
// access to a current frame.
explicit JumpTarget(Directionality direction)
: direction_(direction),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL) {
}
// Construct a jump target.
JumpTarget()
: direction_(FORWARD_ONLY),
reaching_frames_(0),
merge_labels_(0),
entry_frame_(NULL) {
}
virtual ~JumpTarget() {}
// Set the direction of the jump target.
virtual void set_direction(Directionality direction) {
direction_ = direction;
}
// Treat the jump target as a fresh one. The state is reset.
void Unuse();
inline CodeGenerator* cgen();
Label* entry_label() { return &entry_label_; }
VirtualFrame* entry_frame() const { return entry_frame_; }
void set_entry_frame(VirtualFrame* frame) {
entry_frame_ = frame;
}
// Predicates testing the state of the encapsulated label.
bool is_bound() const { return entry_label_.is_bound(); }
bool is_linked() const {
return !is_bound() && !reaching_frames_.is_empty();
}
bool is_unused() const {
// This is !is_bound() && !is_linked().
return !is_bound() && reaching_frames_.is_empty();
}
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
virtual void Jump(Result* arg);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch. The arg is a result that is live both at
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
virtual void Branch(Condition cc,
Result* arg0,
Result* arg1,
Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
virtual void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current
// frame except for an extra return address on top of it. The frame
// after the call is the same as the frame before the call.
void Call();
static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag;
}
protected:
// Directionality flag set at initialization time.
Directionality direction_;
// A list of frames reaching this block via forward jumps.
ZoneList<VirtualFrame*> reaching_frames_;
// A parallel list of labels for merge code.
ZoneList<Label> merge_labels_;
// The frame used on entry to the block and expected at backward
// jumps to the block. Set when the jump target is bound, but may
// or may not be set for forward-only blocks.
VirtualFrame* entry_frame_;
// The actual entry label of the block.
Label entry_label_;
// Implementations of Jump, Branch, and Bind with all arguments and
// return values using the virtual frame.
void DoJump();
void DoBranch(Condition cc, Hint hint);
void DoBind();
private:
static bool compiling_deferred_code_;
// Add a virtual frame reaching this labeled block via a forward jump,
// and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
// Perform initialization required during entry frame computation
// after setting the virtual frame element at index in frame to be
// target.
inline void InitializeEntryElement(int index, FrameElement* target);
// Compute a frame to use for entry to this block.
void ComputeEntryFrame();
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
};
// -------------------------------------------------------------------------
// Break targets
//
// A break target is a jump target that can be used to break out of a
// statement that keeps extra state on the stack (eg, for/in or
// try/finally). They know the expected stack height at the target
// and will drop state from nested statements as part of merging.
//
// Break targets are used for return, break, and continue targets.
class BreakTarget : public JumpTarget {
public:
// Construct a break target.
BreakTarget() {}
virtual ~BreakTarget() {}
// Set the direction of the break target.
virtual void set_direction(Directionality direction);
// Copy the state of this break target to the destination. The
// lists of forward-reaching frames and merge-point labels are
// copied. All virtual frame pointers are copied, not the
// pointed-to frames. The previous state of the destination is
// overwritten, without deallocating pointed-to virtual frames.
void CopyTo(BreakTarget* destination);
// Emit a jump to the target. There must be a current frame at the
// jump and there will be no current frame after the jump.
virtual void Jump();
virtual void Jump(Result* arg);
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
// Bind a break target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward
// jump.
virtual void Bind();
virtual void Bind(Result* arg);
// Setter for expected height.
void set_expected_height(int expected) { expected_height_ = expected; }
private:
// The expected height of the expression stack where the target will
// be bound, statically known at initialization time.
int expected_height_;
DISALLOW_COPY_AND_ASSIGN(BreakTarget);
};
// -------------------------------------------------------------------------
// Shadow break targets
//
@ -280,7 +85,6 @@ class ShadowTarget : public BreakTarget {
DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
};
} } // namespace v8::internal
#endif // V8_JUMP_TARGET_H_

2
deps/v8/src/liveedit.cc

@ -988,7 +988,7 @@ class RelocInfoBuffer {
byte* buffer_;
int buffer_size_;
static const int kBufferGap = 8;
static const int kBufferGap = RelocInfoWriter::kMaxSize;
static const int kMaximalBufferSize = 512*MB;
};

2
deps/v8/src/log.cc

@ -170,7 +170,7 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = it.frame()->pc();
sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
it.Advance();
}
sample->frames_count = i;

5
deps/v8/src/macro-assembler.h

@ -68,13 +68,8 @@ const int kInvalidProtoDepth = -1;
#elif V8_TARGET_ARCH_ARM
#include "arm/constants-arm.h"
#include "assembler.h"
#ifdef V8_ARM_VARIANT_THUMB
#include "arm/assembler-thumb2.h"
#include "arm/assembler-thumb2-inl.h"
#else
#include "arm/assembler-arm.h"
#include "arm/assembler-arm-inl.h"
#endif
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS

5
deps/v8/src/macros.py

@ -112,6 +112,11 @@ macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
macro FLOOR(arg) = $floor(arg);
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
# This is the same as being either a function or an object in V8 terminology.
macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));

8
deps/v8/src/mark-compact.cc

@ -78,6 +78,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepLargeObjectSpace();
if (IsCompacting()) {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
UpdatePointers();
@ -678,6 +679,7 @@ void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
@ -1163,6 +1165,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
Heap::UpdateRSet(target);
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
}
} else {
@ -1177,6 +1181,8 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
if (target_space == Heap::old_pointer_space()) {
Heap::UpdateRSet(target);
}
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
}
}
@ -1735,6 +1741,8 @@ MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
// Noncompacting collections simply sweep the spaces to clear the mark

4
deps/v8/src/mips/assembler-mips.cc

@ -34,6 +34,9 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
@ -1206,3 +1209,4 @@ void Assembler::set_target_address_at(Address pc, Address target) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

3
deps/v8/src/mips/builtins-mips.cc

@ -29,6 +29,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "codegen-inl.h"
#include "debug.h"
#include "runtime.h"
@ -200,3 +202,4 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

4
deps/v8/src/mips/codegen-mips.cc

@ -28,6 +28,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
@ -1426,3 +1428,5 @@ int CompareStub::MinorKey() {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

5
deps/v8/src/mips/constants-mips.cc

@ -26,6 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "constants-mips.h"
namespace assembler {
@ -321,3 +324,5 @@ Instruction::Type Instruction::InstructionType() const {
}
} } // namespace assembler::mips
#endif // V8_TARGET_ARCH_MIPS

4
deps/v8/src/mips/cpu-mips.cc

@ -35,6 +35,9 @@
#endif // #ifdef __mips
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
#include "cpu.h"
namespace v8 {
@ -67,3 +70,4 @@ void CPU::DebugBreak() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save