Browse Source

Upgrade V8 to 3.1.5

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
550f73ae3e
  1. 2
      deps/v8/.gitignore
  2. 1
      deps/v8/AUTHORS
  3. 65
      deps/v8/ChangeLog
  4. 21
      deps/v8/SConstruct
  5. 1
      deps/v8/include/v8.h
  6. 3
      deps/v8/samples/shell.cc
  7. 5
      deps/v8/src/SConscript
  8. 1
      deps/v8/src/accessors.cc
  9. 68
      deps/v8/src/api.cc
  10. 2
      deps/v8/src/arguments.h
  11. 4
      deps/v8/src/arm/assembler-arm-inl.h
  12. 58
      deps/v8/src/arm/assembler-arm.cc
  13. 9
      deps/v8/src/arm/assembler-arm.h
  14. 58
      deps/v8/src/arm/builtins-arm.cc
  15. 381
      deps/v8/src/arm/code-stubs-arm.cc
  16. 46
      deps/v8/src/arm/code-stubs-arm.h
  17. 24
      deps/v8/src/arm/codegen-arm.cc
  18. 2
      deps/v8/src/arm/constants-arm.h
  19. 204
      deps/v8/src/arm/deoptimizer-arm.cc
  20. 557
      deps/v8/src/arm/full-codegen-arm.cc
  21. 117
      deps/v8/src/arm/ic-arm.cc
  22. 88
      deps/v8/src/arm/lithium-arm.cc
  23. 106
      deps/v8/src/arm/lithium-arm.h
  24. 208
      deps/v8/src/arm/lithium-codegen-arm.cc
  25. 8
      deps/v8/src/arm/lithium-codegen-arm.h
  26. 172
      deps/v8/src/arm/macro-assembler-arm.cc
  27. 27
      deps/v8/src/arm/macro-assembler-arm.h
  28. 71
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  29. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  30. 15
      deps/v8/src/arm/simulator-arm.h
  31. 41
      deps/v8/src/arm/stub-cache-arm.cc
  32. 13
      deps/v8/src/arm/virtual-frame-arm.cc
  33. 3
      deps/v8/src/arm/virtual-frame-arm.h
  34. 10
      deps/v8/src/array.js
  35. 2
      deps/v8/src/assembler.cc
  36. 8
      deps/v8/src/assembler.h
  37. 4
      deps/v8/src/ast.cc
  38. 11
      deps/v8/src/bignum.cc
  39. 51
      deps/v8/src/bootstrapper.cc
  40. 45
      deps/v8/src/builtins.cc
  41. 170
      deps/v8/src/builtins.h
  42. 4
      deps/v8/src/code-stubs.cc
  43. 3
      deps/v8/src/code-stubs.h
  44. 1
      deps/v8/src/codegen.cc
  45. 25
      deps/v8/src/compiler.cc
  46. 4
      deps/v8/src/compiler.h
  47. 2
      deps/v8/src/d8.cc
  48. 7
      deps/v8/src/date.js
  49. 4
      deps/v8/src/debug.cc
  50. 14
      deps/v8/src/deoptimizer.cc
  51. 1
      deps/v8/src/execution.cc
  52. 2
      deps/v8/src/execution.h
  53. 9
      deps/v8/src/factory.cc
  54. 2
      deps/v8/src/factory.h
  55. 10
      deps/v8/src/flag-definitions.h
  56. 2
      deps/v8/src/full-codegen.cc
  57. 6
      deps/v8/src/full-codegen.h
  58. 121
      deps/v8/src/gdb-jit.cc
  59. 12
      deps/v8/src/handles.cc
  60. 7
      deps/v8/src/handles.h
  61. 2
      deps/v8/src/heap.cc
  62. 1
      deps/v8/src/heap.h
  63. 186
      deps/v8/src/hydrogen-instructions.cc
  64. 441
      deps/v8/src/hydrogen-instructions.h
  65. 462
      deps/v8/src/hydrogen.cc
  66. 38
      deps/v8/src/hydrogen.h
  67. 17
      deps/v8/src/ia32/assembler-ia32.cc
  68. 53
      deps/v8/src/ia32/assembler-ia32.h
  69. 15
      deps/v8/src/ia32/builtins-ia32.cc
  70. 223
      deps/v8/src/ia32/code-stubs-ia32.cc
  71. 38
      deps/v8/src/ia32/code-stubs-ia32.h
  72. 27
      deps/v8/src/ia32/codegen-ia32.cc
  73. 58
      deps/v8/src/ia32/deoptimizer-ia32.cc
  74. 12
      deps/v8/src/ia32/disasm-ia32.cc
  75. 133
      deps/v8/src/ia32/full-codegen-ia32.cc
  76. 49
      deps/v8/src/ia32/ic-ia32.cc
  77. 332
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  78. 26
      deps/v8/src/ia32/lithium-codegen-ia32.h
  79. 133
      deps/v8/src/ia32/lithium-ia32.cc
  80. 234
      deps/v8/src/ia32/lithium-ia32.h
  81. 133
      deps/v8/src/ia32/macro-assembler-ia32.cc
  82. 9
      deps/v8/src/ia32/macro-assembler-ia32.h
  83. 9
      deps/v8/src/ia32/simulator-ia32.h
  84. 36
      deps/v8/src/ia32/stub-cache-ia32.cc
  85. 14
      deps/v8/src/ia32/virtual-frame-ia32.cc
  86. 3
      deps/v8/src/ia32/virtual-frame-ia32.h
  87. 89
      deps/v8/src/ic.cc
  88. 23
      deps/v8/src/ic.h
  89. 1
      deps/v8/src/liveedit.cc
  90. 7
      deps/v8/src/macro-assembler.h
  91. 2
      deps/v8/src/math.js
  92. 9
      deps/v8/src/messages.cc
  93. 4
      deps/v8/src/messages.js
  94. 2
      deps/v8/src/objects-debug.cc
  95. 22
      deps/v8/src/objects-inl.h
  96. 131
      deps/v8/src/objects.cc
  97. 66
      deps/v8/src/objects.h
  98. 108
      deps/v8/src/oprofile-agent.cc
  99. 48
      deps/v8/src/parser.cc
  100. 5
      deps/v8/src/parser.h

2
deps/v8/.gitignore

@ -20,6 +20,8 @@ d8_g
shell
shell_g
/obj/
/test/es5conform/data/
/test/mozilla/data/
/test/sputnik/sputniktests/
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o

1
deps/v8/AUTHORS

@ -9,6 +9,7 @@ ARM Ltd.
Hewlett-Packard Development Company, LP
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Bert Belder <bertbelder@gmail.com>

65
deps/v8/ChangeLog

@ -1,3 +1,64 @@
2011-02-16: Version 3.1.5
Change RegExp parsing to disallow /(*)/.
Added GDB JIT support for ARM.
Fixed several crash bugs.
Performance improvements on the IA32 platform.
2011-02-14: Version 3.1.4
Fixed incorrect compare of prototypes of the global object (issue
1082).
Fixed a bug in optimizing calls to global functions (issue 1106).
Made optimized Function.prototype.apply safe for non-JSObject first
arguments (issue 1128).
Fixed an error related to element accessors on Object.prototype and
parser errors (issue 1130).
Fixed a bug in sorting an array with large array indices (issue 1131).
Properly treat exceptions thrown while compiling (issue 1132).
Fixed bug in register requirements for function.apply (issue 1133).
Fixed a representation change bug in the Hydrogen graph construction
(issue 1134).
Fixed the semantics of delete on parameters (issue 1136).
Fixed a optimizer bug related to moving instructions with side effects
(issue 1138).
Added support for the global object in Object.keys (issue 1150).
Fixed incorrect value for Math.LOG10E
(issue http://code.google.com/p/chromium/issues/detail?id=72555)
Performance improvements on the IA32 platform.
Implement assignment to undefined reference in ES5 Strict Mode.
2011-02-09: Version 3.1.3
Fixed a bug triggered by functions with huge numbers of declared
arguments.
Fixed zap value aliasing a real object - debug mode only (issue 866).
Fixed issue where Array.prototype.__proto__ had been set to null
(issue 1121).
Fixed stability bugs in Crankshaft for x86.
2011-02-07: Version 3.1.2
Added better security checks when accessing properties via
@ -56,8 +117,8 @@
Introduced partial strict mode support.
Changed formatting of recursive error messages to match Firefox and Safari
(issue http://crbug.com/70334).
Changed formatting of recursive error messages to match Firefox and
Safari (issue http://crbug.com/70334).
Fixed incorrect rounding for float-to-integer conversions for external
array types, which implement the Typed Array spec

21
deps/v8/SConstruct

@ -136,7 +136,7 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
@ -234,9 +234,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
},
'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
}
@ -538,10 +535,6 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'prof:oprofile': {
'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
'LIBS': ['opagent']
}
},
'msvc': {
'all': {
@ -669,8 +662,8 @@ def GuessToolchain(os):
def GuessVisibility(os, toolchain):
if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
# MinGW / Cygwin can't do it.
if os == 'win32' and toolchain == 'gcc':
# MinGW can't do it.
return 'default'
elif os == 'solaris':
return 'default'
@ -691,7 +684,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS,
'help': 'the os to build for (%s)' % OS_GUESS
},
@ -711,7 +704,7 @@ SIMPLE_OPTIONS = {
'help': 'build using snapshots for faster start-up'
},
'prof': {
'values': ['on', 'off', 'oprofile'],
'values': ['on', 'off'],
'default': 'off',
'help': 'enable profiling of build target'
},
@ -896,10 +889,8 @@ def VerifyOptions(env):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')):
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':

1
deps/v8/include/v8.h

@ -462,7 +462,6 @@ class V8EXPORT HandleScope {
void Leave();
internal::Object** prev_next_;
internal::Object** prev_limit_;

3
deps/v8/samples/shell.cc

@ -27,6 +27,7 @@
#include <v8.h>
#include <v8-testing.h>
#include <assert.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
@ -290,11 +291,13 @@ bool ExecuteString(v8::Handle<v8::String> source,
} else {
v8::Handle<v8::Value> result = script->Run();
if (result.IsEmpty()) {
assert(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions)
ReportException(&try_catch);
return false;
} else {
assert(!try_catch.HasCaught());
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.

5
deps/v8/src/SConscript

@ -97,7 +97,6 @@ SOURCES = {
objects.cc
objects-printer.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
preparser.cc
preparse-data.cc
@ -234,7 +233,6 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
@ -266,9 +264,6 @@ D8_FILES = {
'os:solaris': [
'd8-posix.cc'
],
'os:cygwin': [
'd8-posix.cc'
],
'os:win32': [
'd8-windows.cc'
],

1
deps/v8/src/accessors.cc

@ -447,6 +447,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!function->has_prototype()) {
if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;

68
deps/v8/src/api.cc

@ -115,7 +115,9 @@ static FatalErrorCallback exception_behavior = NULL;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
ENTER_V8;
#ifdef ENABLE_VMSTATE_TRACKING
i::VMState __state__(i::OTHER);
#endif
API_Fatal(location, message);
}
@ -668,7 +670,7 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
if (IsDeadCheck("v8::Template::SetProperty()")) return;
if (IsDeadCheck("v8::Template::Set()")) return;
ENTER_V8;
HandleScope scope;
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
@ -2204,6 +2206,12 @@ bool Value::Equals(Handle<Value> that) const {
ENTER_V8;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// If both obj and other are JSObjects, we'd better compare by identity
// immediately when going into JS builtin. The reason is Invoke
// would overwrite global object receiver with global proxy.
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result =
@ -2653,26 +2661,38 @@ int v8::Object::GetIdentityHash() {
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
int hash_value;
if (hash->IsSmi()) {
hash_value = i::Smi::cast(*hash)->value();
} else {
int attempts = 0;
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
i::SetProperty(hidden_props,
hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
static_cast<PropertyAttributes>(None));
i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
if (!hidden_props_obj->IsJSObject()) {
// We failed to create hidden properties. That's a detached
// global proxy.
ASSERT(hidden_props_obj->IsUndefined());
return 0;
}
i::Handle<i::JSObject> hidden_props =
i::Handle<i::JSObject>::cast(hidden_props_obj);
i::Handle<i::String> hash_symbol = i::Factory::identity_hash_symbol();
if (hidden_props->HasLocalProperty(*hash_symbol)) {
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
CHECK(!hash.is_null());
CHECK(hash->IsSmi());
return i::Smi::cast(*hash)->value();
}
int hash_value;
int attempts = 0;
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
CHECK(!i::SetLocalPropertyIgnoreAttributes(
hidden_props,
hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
static_cast<PropertyAttributes>(None)).is_null());
return hash_value;
}
@ -2749,9 +2769,9 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
i::Handle<i::Map> slow_map =
i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*slow_map);
i::Handle<i::Map> pixel_array_map =
i::Factory::GetPixelArrayElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*pixel_array_map);
self->set_elements(*pixels);
}

2
deps/v8/src/arguments.h

@ -78,7 +78,7 @@ class Arguments BASE_EMBEDDED {
class CustomArguments : public Relocatable {
public:
inline CustomArguments(Object* data,
JSObject* self,
Object* self,
JSObject* holder) {
values_[2] = self;
values_[1] = holder;

4
deps/v8/src/arm/assembler-arm-inl.h

@ -198,6 +198,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -221,6 +223,8 @@ void RelocInfo::Visit() {
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT

58
deps/v8/src/arm/assembler-arm.cc

@ -272,7 +272,6 @@ static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
// BUG(3245989): disable peephole optimization if crankshaft is enabled.
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
@ -352,6 +351,11 @@ void Assembler::CodeTargetAlign() {
}
Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
@ -428,6 +432,20 @@ Register Assembler::GetRd(Instr instr) {
}
Register Assembler::GetRn(Instr instr) {
Register reg;
reg.code_ = Instruction::RnValue(instr);
return reg;
}
Register Assembler::GetRm(Instr instr) {
Register reg;
reg.code_ = Instruction::RmValue(instr);
return reg;
}
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
@ -465,6 +483,35 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
}
bool Assembler::IsCmpRegister(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
(CMP | S);
}
bool Assembler::IsCmpImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | CMP | S);
}
Register Assembler::GetCmpImmediateRegister(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return GetRn(instr);
}
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return instr & kOff12Mask;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -1052,6 +1099,13 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
}
void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) {
ASSERT(is_uint12(raw_immediate));
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMN | S, src1, r0, src2);
}
@ -2363,7 +2417,7 @@ void Assembler::nop(int type) {
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
// Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}

9
deps/v8/src/arm/assembler-arm.h

@ -729,6 +729,7 @@ class Assembler : public Malloced {
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
@ -1099,6 +1100,7 @@ class Assembler : public Malloced {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
@ -1109,6 +1111,8 @@ class Assembler : public Malloced {
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
static Register GetRn(Instr instr);
static Register GetRm(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
@ -1116,6 +1120,11 @@ class Assembler : public Malloced {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries

58
deps/v8/src/arm/builtins-arm.cc

@ -1156,12 +1156,48 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
__ stop("builtins-arm.cc: NotifyOSR");
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
__ LeaveInternalFrame();
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ stop("builtins-arm.cc: OnStackReplacement");
// Probe the CPU to set the supported features, because this builtin
// may be called before the initialization performs CPU setup.
CpuFeatures::Probe(false);
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
__ cmp(r0, Operand(Smi::FromInt(-1)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(r0);
__ push(r0);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
}
@ -1195,6 +1231,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@ -1358,10 +1402,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Change context eagerly to get the right global object if necessary.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
// Load the shared function info while the function is still in r0.
__ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ ldr(r0, MemOperand(fp, kRecvOffset));
// Do not transform the receiver for strict mode functions.
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);

381
deps/v8/src/arm/code-stubs-arm.cc

@ -1298,7 +1298,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result;
Label not_heap_number;
Register scratch = r7;
Register scratch = r9.is(tos_) ? r7 : r9;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
@ -2588,6 +2588,39 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ eor(right, left, Operand(right));
__ Ret();
break;
case Token::SAR:
// Remove tags from right operand.
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
// Smi tag result.
__ bic(right, right, Operand(kSmiTagMask));
__ Ret();
break;
case Token::SHR:
// Remove tags from operands. We can't do this on a 31 bit number
// because then the 0s get shifted into bit 30 instead of bit 31.
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
// Unsigned shift is not allowed to produce a negative number, so
// check the sign bit and the sign bit after Smi tagging.
__ tst(scratch1, Operand(0xc0000000));
__ b(ne, &not_smi_result);
// Smi tag result.
__ SmiTag(right, scratch1);
__ Ret();
break;
case Token::SHL:
// Remove tags from operands.
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
// Check that the signed result fits in a Smi.
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
__ b(mi, &not_smi_result);
__ SmiTag(right, scratch1);
__ Ret();
break;
default:
UNREACHABLE();
}
@ -2703,7 +2736,10 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
case Token::BIT_AND:
case Token::SAR:
case Token::SHR:
case Token::SHL: {
if (smi_operands) {
__ SmiUntag(r3, left);
__ SmiUntag(r2, right);
@ -2726,6 +2762,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
d0,
not_numbers);
}
Label result_not_a_smi;
switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
@ -2736,11 +2774,35 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
case Token::BIT_AND:
__ and_(r2, r3, Operand(r2));
break;
case Token::SAR:
// Use only the 5 least significant bits of the shift count.
__ and_(r2, r2, Operand(0x1f));
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, ASR, r2));
break;
case Token::SHR:
// Use only the 5 least significant bits of the shift count.
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, LSR, r2), SetCC);
// SHR is special because it is required to produce a positive answer.
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
}
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, LSL, r2));
break;
default:
UNREACHABLE();
}
Label result_not_a_smi;
// Check that the *signed* result fits in a smi.
__ add(r3, r2, Operand(0x40000000), SetCC);
__ b(mi, &result_not_a_smi);
@ -2760,10 +2822,15 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
__ vcvt_f64_s32(d0, s0);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
}
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
@ -2790,15 +2857,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@ -2825,15 +2883,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label not_smis, call_runtime;
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
// Only allow smi results.
@ -2864,15 +2913,6 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
GenerateTypeTransition(masm);
@ -2880,15 +2920,6 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@ -2903,15 +2934,6 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label call_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@ -2984,6 +3006,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_JS);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_JS);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_JS);
break;
default:
UNREACHABLE();
}
@ -3268,105 +3299,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds the exception.
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
__ cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
__ Throw(r0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
__ ldr(r2, MemOperand(sp, kStateOffset));
__ cmp(r2, Operand(StackHandler::ENTRY));
__ b(eq, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
__ ldr(sp, MemOperand(sp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ mov(r0, Operand(false, RelocInfo::NONE));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
__ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
__ str(r0, MemOperand(r2));
}
// Stack layout at this point. See also StackHandlerConstants.
// sp -> state (ENTRY)
// fp
// lr
// Discard handler state (r2 is not used) and restore frame pointer.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
__ cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
__ ThrowUncatchable(type, r0);
}
@ -3453,7 +3392,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
__ LeaveExitFrame(save_doubles_);
// Callee-saved register r4 still holds argc.
__ LeaveExitFrame(save_doubles_, r4);
__ mov(pc, lr);
// check if we should retry or throw exception
Label retry;
@ -4232,24 +4173,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
static const int kRegExpExecuteArguments = 7;
__ push(lr);
__ PrepareCallCFunction(kRegExpExecuteArguments, r0);
static const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[0]): static offsets vector buffer.
// Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
__ str(r0, MemOperand(sp, 0 * kPointerSize));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
@ -4271,8 +4215,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(r7, kRegExpExecuteArguments);
__ pop(lr);
DirectCEntryStub stub;
stub.GenerateCall(masm, r7);
__ LeaveExitFrame(false, no_reg);
// r0: result
// subject: subject string (callee saved)
@ -4281,6 +4227,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
@ -4293,12 +4240,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r0, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
__ mov(r1, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
__ str(r1, MemOperand(r2, 0)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
__ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
__ cmp(r0, ip);
Label termination_exception;
__ b(eq, &termination_exception);
__ Throw(r0); // Expects thrown value in r0.
__ bind(&termination_exception);
__ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(Factory::null_value()));
@ -5809,10 +5770,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
__ sub(r1, r1, r0, SetCC);
// Correct sign of result in case of overflow.
__ rsb(r1, r1, Operand(0), SetCC, vs);
__ mov(r0, r1);
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
__ sub(r0, r1, SmiUntagOperand(r0));
}
__ Ret();
@ -5923,14 +5883,24 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
ApiFunction *function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Push return address (accessible to GC through exit frame pc).
__ mov(r2,
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
// Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function.
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(target); // Call the C++ function.
}
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -5998,6 +5968,91 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
}
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register elements_map,
Register scratch1,
Register scratch2,
bool load_elements_from_receiver,
bool load_elements_map_from_elements,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged unless the
// store succeeds.
// key - holds the key (must be a smi) and is unchanged.
// value - holds the value (must be a smi) and is unchanged.
// elements - holds the element object of the receiver on entry if
// load_elements_from_receiver is false, otherwise used
// internally to store the pixel arrays elements and
// external array pointer.
// elements_map - holds the map of the element object if
// load_elements_map_from_elements is false, otherwise
// loaded with the element map.
//
Register external_pointer = elements;
Register untagged_key = scratch1;
Register untagged_value = scratch2;
if (load_elements_from_receiver) {
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
}
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
if (load_elements_map_from_elements) {
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
}
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(elements_map, ip);
__ b(ne, not_pixel_array);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(elements_map, ip);
__ Assert(eq, "Elements isn't a pixel array");
}
}
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
__ SmiUntag(untagged_key, key);
// Perform bounds check.
__ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(untagged_key, scratch2);
__ b(hs, out_of_range); // unsigned check handles negative keys.
__ JumpIfNotSmi(value, value_not_smi);
__ SmiUntag(untagged_value, value);
// Clamp the value to [0..255].
__ Usat(untagged_value, 8, Operand(untagged_value));
// Get the pointer to the external array. This clobbers elements.
__ ldr(external_pointer,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ strb(untagged_value, MemOperand(external_pointer, untagged_key));
__ Ret();
}
#undef __
} } // namespace v8::internal

46
deps/v8/src/arm/code-stubs-arm.h

@ -581,6 +581,7 @@ class DirectCEntryStub: public CodeStub {
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, ApiFunction *function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
@ -589,14 +590,14 @@ class DirectCEntryStub: public CodeStub {
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
// Generate code to load an element from a pixel array. The receiver is assumed
// to not be a smi and to have elements, the caller must guarantee this
// precondition. If key is not a smi, then the generated code branches to
// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
// check has already been performed on key so that the smi check is not
// generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -609,6 +610,35 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Label* key_not_smi,
Label* out_of_range);
// Generate code to store an element into a pixel array, clamping values between
// [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated. If value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
// elements, the generated code will branch to not_pixel_array, unless
// not_pixel_array is NULL, in which case the caller must ensure that the
// receiver has pixel array elements. If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range. If
// load_elements_from_receiver is true, then the elements of receiver is loaded
// into elements, otherwise elements is assumed to already be the receiver's
// elements. If load_elements_map_from_elements is true, elements_map is loaded
// from elements, otherwise it is assumed to already contain the element map.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register elements_map,
Register scratch1,
Register scratch2,
bool load_elements_from_receiver,
bool load_elements_map_from_elements,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range);
} } // namespace v8::internal

24
deps/v8/src/arm/codegen-arm.cc

@ -2192,15 +2192,10 @@ void CodeGenerator::GenerateReturnSequence() {
DeleteFrame();
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceInstructions <=
masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -5849,15 +5844,20 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
Load(property->obj());
Load(property->key());
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@ -6931,7 +6931,7 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
frame()->CallStoreIC(name, is_contextual);
frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
} else {
// Inline the in-object property case.
JumpTarget slow, done;

2
deps/v8/src/arm/constants-arm.h

@ -582,6 +582,7 @@ class Instruction {
inline int TypeValue() const { return Bits(27, 25); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
@ -625,6 +626,7 @@ class Instruction {
inline int SValue() const { return Bit(20); }
// with register
inline int RmValue() const { return Bits(3, 0); }
DECLARE_STATIC_ACCESSOR(RmValue);
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));

204
deps/v8/src/arm/deoptimizer-arm.cc

@ -124,19 +124,204 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
const int kInstrSize = Assembler::kInstrSize;
// The call of the stack guard check has the following form:
// e1 5d 00 0c cmp sp, <limit>
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
(al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// and overwrite the constant containing the
// address of the stack check stub.
// Replace conditional jump with NOP.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(+4, cs);
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
}
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
// Read the number of frames.
value = it.Next();
if (value == 1) return i;
}
}
UNREACHABLE();
return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED();
DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, ast_id);
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
USE(function);
ASSERT(function == function_);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
output_frame_size);
}
// There's only one output frame in the OSR case.
output_count_ = 1;
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
unsigned output_offset = output_frame_size - kPointerSize;
int parameter_count = function_->shared()->formal_parameter_count() + 1;
for (int i = 0; i < parameter_count; ++i) {
output_[0]->SetFrameSlot(output_offset, 0);
output_offset -= kPointerSize;
}
// Translate the incoming parameters. This may overwrite some of the
// incoming argument slots we've just cleared.
int input_offset = input_frame_size - kPointerSize;
bool ok = true;
int limit = input_offset - (parameter_count * kPointerSize);
while (ok && input_offset > limit) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = 0; ok && i < 4; i++) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
output_offset,
input_value,
input_offset);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
}
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// If translation of any command failed, continue using the input frame.
if (!ok) {
delete output_[0];
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@ -318,7 +503,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TOS: bailout-id; TOS+1: return address if not EAGER.
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -353,6 +537,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r3, Operand(0));
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
__ mov(r3, lr);
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(r3, lr);
// Correct two words for bailout id and return address.
@ -375,7 +563,6 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
@ -396,7 +583,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
if (type() == EAGER) {
if (type() == EAGER || type() == OSR) {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
@ -450,11 +637,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
UNIMPLEMENTED();
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));

557
deps/v8/src/arm/full-codegen-arm.cc

@ -45,6 +45,67 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
// immediate value is used) is the delta from the pc to the first instruction of
// the patchable code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
// When initially emitting this ensure that a jump is always generated to skip
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
__ b(ne, target); // Never taken before patched.
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
reg.set_code(delta_to_patch_site / kOff12Mask);
__ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@ -268,15 +329,10 @@ void FullCodeGenerator::EmitReturnSequence() {
}
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in the
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceInstructions <=
masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -285,7 +341,17 @@ void FullCodeGenerator::EmitReturnSequence() {
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
Token::Value op, Expression* left, Expression* right) {
ASSERT(ShouldInlineSmiCase(op));
return kNoConstants;
if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
// We never generate inlined constant smi operations for these.
return kNoConstants;
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
// Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
}
}
@ -681,18 +747,24 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForStackValue(prop->obj());
// property. Use (keyed) IC to set the initial value. We
// cannot visit the rewrite because it's shared and we risk
// recording duplicate AST IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
if (function != NULL) {
VisitForStackValue(prop->key());
__ push(r0);
VisitForAccumulatorValue(function);
__ pop(r1); // Key.
__ pop(r2);
} else {
VisitForAccumulatorValue(prop->key());
__ mov(r1, result_register()); // Key.
__ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ mov(r2, r0);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
}
__ pop(r2); // Receiver.
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -752,24 +824,24 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
__ bind(&slow_case);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(eq, true, flags, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site);
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@ -1536,34 +1608,316 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
// Optimistically add smi value with unknown object. If result overflows or is
// not a smi then we had either a smi overflow or added a smi with a tagged
// pointer.
__ mov(r1, Operand(value));
__ add(r2, r0, r1, SetCC);
__ b(vs, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfNotSmi(r2, &call_stub);
__ mov(r0, r2);
__ b(&done);
// Call the shared stub.
__ bind(&call_stub);
if (!left_is_constant_smi) {
__ Swap(r0, r1, r2);
}
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
// Optimistically subtract smi value and unknown object. If result overflows
// or is not a smi then we had either a smi overflow or subtraction between a
// smi and a tagged pointer.
__ mov(r1, Operand(value));
if (left_is_constant_smi) {
__ sub(r2, r1, r0, SetCC);
} else {
__ sub(r2, r0, r1, SetCC);
}
__ b(vs, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfNotSmi(r2, &call_stub);
__ mov(r0, r2);
__ b(&done);
// Call the shared stub.
__ bind(&call_stub);
if (!left_is_constant_smi) {
__ Swap(r0, r1, r2);
}
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label call_stub, smi_case, done;
int shift_value = value->value() & 0x1f;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(r0, &smi_case);
// Call stub.
__ bind(&call_stub);
__ mov(r1, r0);
__ mov(r0, Operand(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ b(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::SHL:
if (shift_value != 0) {
__ mov(r1, r0);
if (shift_value > 1) {
__ mov(r1, Operand(r1, LSL, shift_value - 1));
}
// Convert int result to smi, checking that it is in int range.
__ SmiTag(r1, SetCC);
__ b(vs, &call_stub);
__ mov(r0, r1); // Put result back into r0.
}
break;
case Token::SAR:
if (shift_value != 0) {
__ mov(r0, Operand(r0, ASR, shift_value));
__ bic(r0, r0, Operand(kSmiTagMask));
}
break;
case Token::SHR:
// SHR must return a positive value. When shifting by 0 or 1 we need to
// check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(r2, Operand(shift_value));
__ SmiUntag(r1, r0);
if (shift_value != 0) {
__ mov(r1, Operand(r1, LSR, shift_value));
}
__ tst(r1, Operand(0xc0000000));
__ b(ne, &call_stub);
__ SmiTag(r0, r1); // result in r0.
} else {
__ SmiUntag(r0);
__ mov(r0, Operand(r0, LSR, shift_value));
__ SmiTag(r0);
}
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label smi_case, done;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(r0, &smi_case);
// The order of the arguments does not matter for bit-ops with a
// constant operand.
__ mov(r1, Operand(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
__ mov(r1, Operand(value));
switch (op) {
case Token::BIT_OR:
__ orr(r0, r0, Operand(r1));
break;
case Token::BIT_XOR:
__ eor(r0, r0, Operand(r1));
break;
case Token::BIT_AND:
__ and_(r0, r0, Operand(r1));
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
switch (op) {
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
EmitConstantSmiBitOp(expr, op, mode, value);
break;
case Token::SHL:
case Token::SAR:
case Token::SHR:
ASSERT(!left_is_constant_smi);
EmitConstantSmiShiftOp(expr, op, mode, value);
break;
case Token::ADD:
EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
break;
case Token::SUB:
EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
break;
default:
UNREACHABLE();
}
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
Expression* right,
Expression* left_expr,
Expression* right_expr,
ConstantOperand constant) {
ASSERT(constant == kNoConstants); // Only handled case.
EmitBinaryOp(op, mode);
if (constant == kRightConstant) {
Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, false, value);
return;
} else if (constant == kLeftConstant) {
Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, true, value);
return;
}
Label done, smi_case, stub_call;
Register scratch1 = r2;
Register scratch2 = r3;
// Get the arguments.
Register left = r1;
Register right = r0;
__ pop(left);
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
STATIC_ASSERT(kSmiTag == 0);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
// TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ b(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
__ bic(right, right, Operand(kSmiTagMask));
break;
case Token::SHL: {
__ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
__ b(mi, &stub_call);
__ SmiTag(right, scratch1);
break;
}
case Token::SHR: {
__ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
__ tst(scratch1, Operand(0xc0000000));
__ b(ne, &stub_call);
__ SmiTag(right, scratch1);
break;
}
case Token::ADD:
__ add(scratch1, left, Operand(right), SetCC);
__ b(vs, &stub_call);
__ mov(right, scratch1);
break;
case Token::SUB:
__ sub(scratch1, left, Operand(right), SetCC);
__ b(vs, &stub_call);
__ mov(right, scratch1);
break;
case Token::MUL: {
__ SmiUntag(ip, right);
__ smull(scratch1, scratch2, left, ip);
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
__ tst(scratch1, Operand(scratch1));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
__ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
__ b(mi, &stub_call);
break;
}
case Token::BIT_OR:
__ orr(right, left, Operand(right));
break;
case Token::BIT_AND:
__ and_(right, left, Operand(right));
break;
case Token::BIT_XOR:
__ eor(right, left, Operand(right));
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
if (op == Token::ADD ||
op == Token::SUB ||
op == Token::MUL ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::BIT_OR ||
op == Token::BIT_AND ||
op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
}
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), NULL);
context()->Plug(r0);
}
@ -1606,10 +1960,20 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ pop(r2);
if (prop->is_synthetic()) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(r2, r0);
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ pop(r2);
}
__ pop(r0); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -1635,8 +1999,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Handle<Code> ic(Builtins::builtin(is_strict()
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@ -2991,39 +3357,50 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (prop == NULL && var == NULL) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
context()->Plug(false);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name()));
__ Push(r1, r0);
context()->Plug(r0);
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
} else if (var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Non-global variable. Call the runtime to delete from the
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(r0);
}
context()->Plug(r0);
} else {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
}
break;
}
@ -3214,13 +3591,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
__ JumpIfSmi(r0, &done);
patch_site.EmitJumpIfSmi(r0, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@ -3230,8 +3610,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in r0.
@ -3510,21 +3890,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
__ JumpIfNotSmi(r2, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
}
@ -3591,6 +3972,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));

117
deps/v8/src/arm/ic-arm.cc

@ -115,6 +115,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
@ -843,7 +846,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- lr : return address
// -----------------------------------
// Check if the name is a string.
Label miss;
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1465,24 +1475,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// r4: elements map.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
// Get the pointer to the external array. This clobbers elements.
__ ldr(elements,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
__ Ret();
GenerateFastPixelArrayStore(masm,
r2,
r1,
r0,
elements,
r4,
r5,
r6,
false,
false,
NULL,
&slow,
&slow,
&slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@ -1533,7 +1539,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@ -1544,7 +1551,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
MONOMORPHIC,
extra_ic_state);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
@ -1700,11 +1708,78 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_));
}
#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address) {
// Currently there is no smi inlining in the ARM full code generator.
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
if (!Assembler::IsCmpImmediate(instr)) {
return;
}
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int delta = Assembler::GetCmpImmediateRawImmediate(instr);
delta +=
Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
// If the delta is 0 the instruction is cmp r0, #0 which also signals that
// nothing was inlined.
if (delta == 0) {
return;
}
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}

88
deps/v8/src/arm/lithium-arm.cc

@ -62,15 +62,13 @@ void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
// Inputs must use a fixed register.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
LUnallocated::cast(operand)->IsUsedAtStart() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
@ -186,6 +184,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@ -802,6 +803,16 @@ LInstruction* LChunkBuilder::DoBit(Token::Value op,
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32());
@ -1021,7 +1032,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
UseRegisterAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
@ -1077,6 +1088,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
@ -1131,8 +1144,8 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* length = UseFixed(instr->length(), r2);
LOperand* elements = UseFixed(instr->elements(), r3);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
@ -1304,10 +1317,10 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* value = UseFixed(instr->left(), r0);
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new LDivI(value, divisor), r0)));
DefineFixed(new LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@ -1417,7 +1430,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
@ -1478,6 +1491,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
}
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
@ -1500,6 +1522,12 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
}
LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LPixelArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LFixedArrayLength(array));
@ -1642,13 +1670,11 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
int32_t value = instr->Integer32Value();
return DefineAsRegister(new LConstantI(value));
return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
return DefineAsRegister(new LConstantD(value));
return DefineAsRegister(new LConstantD);
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@ -1716,7 +1742,14 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineSameAsFirst(new LLoadElements(input));
return DefineAsRegister(new LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
HLoadPixelArrayExternalPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
@ -1731,6 +1764,19 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
HLoadPixelArrayElement* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer =
UseRegisterAtStart(instr->external_pointer());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadPixelArrayElement* result =
new LLoadPixelArrayElement(external_pointer, key);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
@ -1832,8 +1878,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseFixed(instr->object(), r0);
LOperand* key = UseFixed(instr->key(), r1);
LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1881,7 +1927,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value()));
LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1890,6 +1936,12 @@ LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall());
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);

106
deps/v8/src/arm/lithium-arm.h

@ -41,7 +41,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@ -95,6 +94,7 @@ class LCodeGen;
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@ -123,6 +123,8 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -132,6 +134,7 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -153,6 +156,8 @@ class LCodeGen;
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@ -735,6 +740,17 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@ -903,44 +919,30 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
class LConstantI: public LConstant {
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantI(int32_t value) : value_(value) { }
int32_t value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
int32_t value_;
int32_t value() const { return hydrogen()->Integer32Value(); }
};
class LConstantD: public LConstant {
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantD(double value) : value_(value) { }
double value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
double value_;
double value() const { return hydrogen()->DoubleValue(); }
};
class LConstantT: public LConstant {
class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantT(Handle<Object> value) : value_(value) { }
Handle<Object> value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
Handle<Object> value_;
Handle<Object> value() const { return hydrogen()->handle(); }
};
@ -990,6 +992,17 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LPixelArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
};
class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
@ -1139,6 +1152,17 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadPixelArrayExternalPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
@ -1154,6 +1178,22 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
@ -1716,6 +1756,24 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {

208
deps/v8/src/arm/lithium-codegen-arm.cc

@ -647,7 +647,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
if (cc == kNoCondition) {
if (cc == al) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
@ -1188,8 +1188,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ tst(left, Operand(left));
__ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(kNoCondition, instr->environment());
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
DeoptimizeIf(al, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
@ -1322,6 +1322,13 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
}
void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
@ -1605,7 +1612,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right));
__ cmp(ToRegister(left), ToRegister(right));
}
@ -1619,8 +1626,7 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
@ -1647,8 +1653,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
@ -1891,14 +1896,42 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
}
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(scratch, result);
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Abort("DoHasCachedArrayIndex unimplemented.");
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
EmitBranch(true_block, false_block, eq);
}
@ -2180,12 +2213,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@ -2196,7 +2229,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
Abort("DoCmpTAndBranch unimplemented.");
Token::Value op = instr->op();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
EmitBranch(true_block, false_block, condition);
}
@ -2342,17 +2389,20 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->InputAt(0)));
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
__ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset));
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done;
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ Check(eq, "Check for fast elements failed.");
@ -2361,6 +2411,14 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
}
void LCodeGen::DoLoadPixelArrayExternalPointer(
LLoadPixelArrayExternalPointer* instr) {
Register to_reg = ToRegister(instr->result());
Register from_reg = ToRegister(instr->InputAt(0));
__ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
@ -2397,6 +2455,16 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
Register external_elements = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
// Load the result.
__ ldrb(result, MemOperand(external_elements, key));
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@ -2448,29 +2516,33 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(r0));
ASSERT(function.is(r1));
ASSERT(receiver.is(r0)); // Used for parameter count.
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
// If the receiver is null or undefined, we have to pass the
// global object as a receiver.
Label global_receiver, receiver_ok;
// If the receiver is null or undefined, we have to pass the global object
// as a receiver.
Label global_object, receiver_ok;
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
__ cmp(receiver, scratch);
__ b(eq, &global_receiver);
__ b(eq, &global_object);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ cmp(receiver, scratch);
__ b(ne, &receiver_ok);
__ bind(&global_receiver);
__ ldr(receiver, GlobalObjectOperand());
__ bind(&receiver_ok);
__ b(eq, &global_object);
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
// Deoptimize if the receiver is not a JS object.
__ tst(receiver, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
DeoptimizeIf(lo, instr->environment());
__ jmp(&receiver_ok);
Label invoke;
__ bind(&global_object);
__ ldr(receiver, GlobalObjectOperand());
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@ -2487,7 +2559,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
Label loop;
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ tst(length, Operand(length));
__ b(eq, &invoke);
@ -2510,6 +2582,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// by InvokeFunction.
v8::internal::ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@ -2899,7 +2972,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<Code> ic(Builtins::builtin(info_->is_strict()
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3778,6 +3853,55 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
EmitIsConstructCall(result, scratch0());
__ b(eq, &true_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
EmitBranch(true_block, false_block, eq);
}
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
ASSERT(!temp1.is(temp2));
// Get the frame pointer for the calling frame.
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@ -3785,14 +3909,16 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(kNoCondition, instr->environment());
DeoptimizeIf(al, instr->environment());
}
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
__ Push(object, key);
Register strict = scratch0();
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
LEnvironment* env = instr->deoptimization_environment();
@ -3818,7 +3944,19 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
Abort("DoOsrEntry unimplemented.");
// This is a pseudo-instruction that ensures that the environment here is
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}

8
deps/v8/src/arm/lithium-codegen-arm.h

@ -129,6 +129,10 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
return info_->is_strict() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@ -264,6 +268,10 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

172
deps/v8/src/arm/macro-assembler-arm.cc

@ -714,7 +714,8 @@ int MacroAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
@ -736,12 +737,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
str(r3, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return. Callee-saved
// register r4 still holds argc.
// Tear down the exit frame, pop the arguments, and return.
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
mov(pc, lr);
if (argument_count.is_valid()) {
add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
}
}
@ -929,8 +930,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
Register scratch,
Label* fail) {
ASSERT(kNotStringTag != 0);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
@ -1005,6 +1006,117 @@ void MacroAssembler::PopTryHandler() {
}
void MacroAssembler::Throw(Register value) {
// r0 is expected to hold the exception.
if (!value.is(r0)) {
mov(r0, value);
}
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r2);
str(r2, MemOperand(r3));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
}
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// r0 is expected to hold the exception.
if (!value.is(r0)) {
mov(r0, value);
}
// Drop sp to the top stack handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
ldr(r2, MemOperand(sp, kStateOffset));
cmp(r2, Operand(StackHandler::ENTRY));
b(eq, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
ldr(sp, MemOperand(sp, kNextOffset));
jmp(&loop);
bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r2);
str(r2, MemOperand(r3));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
str(r0, MemOperand(r2));
}
// Stack layout at this point. See also StackHandlerConstants.
// sp -> state (ENTRY)
// fp
// lr
// Discard handler state (r2 is not used) and restore frame pointer.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@ -1150,7 +1262,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
add(scratch2, result, Operand(obj_size_reg));
add(scratch2, result, Operand(obj_size_reg), SetCC);
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
str(scratch2, MemOperand(topaddr));
@ -1229,10 +1342,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2));
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
add(scratch2, result, Operand(object_size));
add(scratch2, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@ -1552,9 +1666,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
// LeaveExitFrame expects unwind space to be in r4.
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
LeaveExitFrame(false);
LeaveExitFrame(false, r4);
mov(pc, lr);
bind(&promote_scheduled_exception);
MaybeObject* result = TryTailCallExternalReference(
@ -1771,6 +1886,13 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
}
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
Register src,
int num_least_bits) {
and_(dst, src, Operand((1 << num_least_bits) - 1));
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@ -2113,6 +2235,19 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
void MacroAssembler::AbortIfNotString(Register object) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is not a string");
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
Assert(lo, "Operand is not a string");
}
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
@ -2379,7 +2514,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
@ -2402,15 +2536,21 @@ CodePatcher::~CodePatcher() {
}
void CodePatcher::Emit(Instr x) {
masm()->emit(x);
void CodePatcher::Emit(Instr instr) {
masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
#endif // ENABLE_DEBUGGER_SUPPORT
void CodePatcher::EmitCondition(Condition cond) {
Instr instr = Assembler::instr_at(masm_.pc_);
instr = (instr & ~kCondMask) | cond;
masm_.emit(instr);
}
} } // namespace v8::internal

27
deps/v8/src/arm/macro-assembler-arm.h

@ -45,6 +45,12 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
}
static inline Operand SmiUntagOperand(Register object) {
return Operand(object, ASR, kSmiTagSize);
}
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
@ -291,7 +297,9 @@ class MacroAssembler: public Assembler {
void EnterExitFrame(bool save_doubles, int stack_space = 0);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(bool save_doubles);
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@ -365,6 +373,13 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopTryHandler();
// Passes thrown value (in r0) to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(UncatchableExceptionType type, Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@ -558,6 +573,7 @@ class MacroAssembler: public Assembler {
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg,
@ -784,6 +800,9 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
@ -886,11 +905,15 @@ class CodePatcher {
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr x);
void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
// Emit the condition part of an instruction leaving the rest of the current
// instruction unchanged.
void EmitCondition(Condition cond);
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.

71
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -57,48 +57,57 @@ namespace internal {
* - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
*
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
* - backup of registers r4..r11
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
* --- sp ---
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
* - fp[32] return address (lr).
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
* --- sp ---
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
* character of the string). The remaining registers start out as garbage.
*
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
* Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
* bool at_start,
* byte* stack_area_base,
* bool direct_call)
* bool direct_call = false)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in arm/simulator-arm.h.
* When calling as a non-direct call (i.e., from C++ code), the return address
* area is overwritten with the LR register by the RegExp code. When doing a
* direct call from generated code, the return address is placed there by
* the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@ -598,16 +607,17 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
// Push Link register.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
// Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
__ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
// Set frame pointer just above the arguments.
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
@ -764,10 +774,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
Label grow_failed;
// Call GrowStack(backtrack_stackpointer())
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -122,8 +122,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;

15
deps/v8/src/arm/simulator-arm.h

@ -48,10 +48,16 @@ namespace internal {
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
(entry(p0, p1, p2, p3, p4, p5, p6))
(FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
@ -362,8 +368,7 @@ class Simulator {
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == \

41
deps/v8/src/arm/stub-cache-arm.cc

@ -3259,6 +3259,47 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
JSObject* receiver) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- r3 : scratch
// -- r4 : scratch
// -- r5 : scratch
// -- r6 : scratch
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the map matches.
__ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
GenerateFastPixelArrayStore(masm(),
r2,
r1,
r0,
r3,
r4,
r5,
r6,
true,
true,
&miss,
&miss,
NULL,
&miss);
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ----------- S t a t e -------------
// -- r0 : argc

13
deps/v8/src/arm/virtual-frame-arm.cc

@ -329,18 +329,25 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
}
void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
PopToR0();
RelocInfo::Mode mode;
if (is_contextual) {
SpillAll();
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
EmitPop(r1);
SpillAll();
mode = RelocInfo::CODE_TARGET;
}
__ mov(r2, Operand(name));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
CallCodeObject(ic, mode, 0);
}

3
deps/v8/src/arm/virtual-frame-arm.h

@ -294,7 +294,8 @@ class VirtualFrame : public ZoneObject {
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
void CallStoreIC(Handle<String> name, bool is_contextual);
void CallStoreIC(Handle<String> name, bool is_contextual,
StrictModeFlag strict_mode);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.

10
deps/v8/src/array.js

@ -161,15 +161,7 @@ function Join(array, length, separator, convert) {
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
var length2 = (length << 1) - 1;
var j = length2;
var i = length;
elements[--j] = elements[--i];
while (i > 0) {
elements[--j] = separator;
elements[--j] = elements[--i];
}
return %StringBuilderConcat(elements, length2, '');
return %StringBuilderJoin(elements, length, separator);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.

2
deps/v8/src/assembler.cc

@ -68,7 +68,7 @@ const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of Label

8
deps/v8/src/assembler.h

@ -178,10 +178,16 @@ class RelocInfo BASE_EMBEDDED {
// invalid/uninitialized position value.
static const int kNoPosition = -1;
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
// we do not normally record relocation info.
static const char* kFillerCommentString;
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // Code target used for contextual loads.
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,

4
deps/v8/src/ast.cc

@ -618,7 +618,9 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
cell_ = Handle<JSGlobalPropertyCell>::null();
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() && lookup.type() == NORMAL) {
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
lookup.holder() == *global) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));

11
deps/v8/src/bignum.cc

@ -67,7 +67,7 @@ void Bignum::AssignUInt64(uint64_t value) {
int needed_bigits = kUInt64Size / kBigitSize + 1;
EnsureCapacity(needed_bigits);
for (int i = 0; i < needed_bigits; ++i) {
bigits_[i] = value & kBigitMask;
bigits_[i] = static_cast<Chunk>(value & kBigitMask);
value = value >> kBigitSize;
}
used_digits_ = needed_bigits;
@ -266,7 +266,7 @@ void Bignum::MultiplyByUInt32(uint32_t factor) {
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@ -287,13 +287,13 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
uint64_t product_low = low * bigits_[i];
uint64_t product_high = high * bigits_[i];
uint64_t tmp = (carry & kBigitMask) + product_low;
bigits_[i] = tmp & kBigitMask;
bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@ -748,7 +748,8 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
for (int i = 0; i < other.used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
DoubleChunk remove = borrow + product;
Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
Chunk difference =
bigits_[i + exponent_diff] - static_cast<Chunk>(remove & kBigitMask);
bigits_[i + exponent_diff] = difference & kBigitMask;
borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
(remove >> kBigitSize));

51
deps/v8/src/bootstrapper.cc

@ -349,7 +349,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
prototype,
call_code,
is_ecma_native);
SetProperty(target, symbol, function, DONT_ENUM);
SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM);
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
@ -580,8 +580,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetProperty(prototype, Factory::constructor_symbol(),
Top::object_function(), NONE);
SetLocalPropertyNoThrow(
prototype, Factory::constructor_symbol(), Top::object_function(), NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
@ -683,7 +683,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_security_token(*inner_global);
Handle<String> object_name = Handle<String>(Heap::Object_symbol());
SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
SetLocalPropertyNoThrow(inner_global, object_name,
Top::object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@ -851,7 +852,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetProperty(global, name, json_object, DONT_ENUM);
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
}
@ -880,12 +881,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_arguments_boilerplate(*result);
// Note: callee must be added as the first property and
// length must be added as the second property.
SetProperty(result, Factory::callee_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetProperty(result, Factory::length_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetLocalPropertyNoThrow(result, Factory::callee_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetLocalPropertyNoThrow(result, Factory::length_symbol(),
Factory::undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
@ -1085,10 +1086,8 @@ bool Genesis::InstallNatives() {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
SetProperty(builtins,
global_symbol,
Handle<Object>(global_context()->global()),
attributes);
Handle<Object> global_obj(global_context()->global());
SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
// Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@ -1480,17 +1479,17 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
SetProperty(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
SetProperty(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -1507,8 +1506,8 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
Handle<String> debug_string =
Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
SetProperty(js_global, debug_string,
Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
Handle<Object> global_proxy(Debug::debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
#endif
}
@ -1679,7 +1678,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
SetProperty(to, key, value, details.attributes());
SetLocalPropertyNoThrow(to, key, value, details.attributes());
break;
}
case CONSTANT_FUNCTION: {
@ -1687,7 +1686,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
SetProperty(to, key, fun, details.attributes());
SetLocalPropertyNoThrow(to, key, fun, details.attributes());
break;
}
case CALLBACKS: {
@ -1737,7 +1736,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
SetProperty(to, key, value, details.attributes());
SetLocalPropertyNoThrow(to, key, value, details.attributes());
}
}
}

45
deps/v8/src/builtins.cc

@ -368,7 +368,9 @@ static bool ArrayPrototypeHasNoElements(Context* global_context,
array_proto = JSObject::cast(array_proto->GetPrototype());
ASSERT(array_proto->elements() == Heap::empty_fixed_array());
// Object.prototype
array_proto = JSObject::cast(array_proto->GetPrototype());
Object* proto = array_proto->GetPrototype();
if (proto == Heap::null_value()) return false;
array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(array_proto->GetPrototype()->IsNull());
@ -1305,6 +1307,11 @@ static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
}
static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@ -1315,8 +1322,18 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
}
static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
StoreIC::GenerateNormal(masm);
}
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict);
}
static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict);
}
@ -1325,11 +1342,21 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
}
static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
StoreIC::GenerateArrayLength(masm);
}
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm);
}
@ -1442,13 +1469,13 @@ void Builtins::Setup(bool create_heap_objects) {
extra_args \
},
#define DEF_FUNCTION_PTR_A(name, kind, state) \
{ FUNCTION_ADDR(Generate_##name), \
NULL, \
#name, \
name, \
Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \
NO_EXTRA_ARGUMENTS \
#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \
{ FUNCTION_ADDR(Generate_##name), \
NULL, \
#name, \
name, \
Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \
NO_EXTRA_ARGUMENTS \
},
// Define array of pointers to generators and C builtin functions.

170
deps/v8/src/builtins.h

@ -63,73 +63,135 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(LazyCompile, BUILTIN, UNINITIALIZED) \
V(LazyRecompile, BUILTIN, UNINITIALIZED) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyOSR, BUILTIN, UNINITIALIZED) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyCompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
StoreIC::kStoreICStrict) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
V(FunctionApply, BUILTIN, UNINITIALIZED) \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED) \
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED)
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK)
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@ -152,7 +214,7 @@ enum BuiltinExtraArguments {
V(SHL, 1) \
V(SAR, 1) \
V(SHR, 1) \
V(DELETE, 1) \
V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
V(GET_KEYS, 0) \
@ -186,7 +248,7 @@ class Builtins : public AllStatic {
enum Name {
#define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state) name,
#define DEF_ENUM_A(name, kind, state, extra) name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)

4
deps/v8/src/code-stubs.cc

@ -32,7 +32,6 @@
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
#include "oprofile-agent.h"
namespace v8 {
namespace internal {
@ -63,9 +62,6 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
OPROFILE(CreateNativeCodeRegion(GetName(),
code->instruction_start(),
code->instruction_size()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());

3
deps/v8/src/code-stubs.h

@ -86,9 +86,6 @@ namespace internal {
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
// Types of uncatchable exceptions.
enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };

1
deps/v8/src/codegen.cc

@ -31,7 +31,6 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
#include "rewriter.h"

25
deps/v8/src/compiler.cc

@ -39,7 +39,6 @@
#include "hydrogen.h"
#include "lithium.h"
#include "liveedit.h"
#include "oprofile-agent.h"
#include "parser.h"
#include "rewriter.h"
#include "runtime-profiler.h"
@ -289,6 +288,11 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
if (Top::has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
}
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> code = graph->Compile();
if (!code.is_null()) {
@ -419,9 +423,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
String::cast(script->name())));
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code()));
@ -432,9 +433,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
""));
OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script",
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
@ -608,7 +606,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compile the code.
if (!MakeCode(info)) {
Top::StackOverflow();
if (!Top::has_pending_exception()) {
Top::StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
@ -783,7 +783,6 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (Logger::is_logging() ||
OProfileAgent::is_enabled() ||
CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
@ -795,18 +794,10 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*name,
String::cast(script->name()),
line_num));
OPROFILE(CreateNativeCodeRegion(*name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size()));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*name));
OPROFILE(CreateNativeCodeRegion(*name,
code->instruction_start(),
code->instruction_size()));
}
}

4
deps/v8/src/compiler.h

@ -71,7 +71,6 @@ class CompilationInfo BASE_EMBEDDED {
flags_ |= IsGlobal::encode(true);
}
void MarkAsStrict() {
ASSERT(!is_lazy());
flags_ |= IsStrict::encode(true);
}
StrictModeFlag StrictMode() {
@ -153,6 +152,9 @@ class CompilationInfo BASE_EMBEDDED {
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (!shared_info_.is_null() && shared_info_->strict_mode()) {
MarkAsStrict();
}
}
void SetMode(Mode mode) {

2
deps/v8/src/d8.cc

@ -127,11 +127,13 @@ bool Shell::ExecuteString(Handle<String> source,
} else {
Handle<Value> result = script->Run();
if (result.IsEmpty()) {
ASSERT(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions && !i::FLAG_debugger)
ReportException(&try_catch);
return false;
} else {
ASSERT(!try_catch.HasCaught());
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.

7
deps/v8/src/date.js

@ -81,12 +81,7 @@ function TimeFromYear(year) {
function InLeapYear(time) {
return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
return DAY(time) - DayFromYear(YearFromTime(time));
return DaysInYear(YearFromTime(time)) - 365; // Returns 1 or 0.
}

4
deps/v8/src/debug.cc

@ -835,7 +835,9 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = Factory::LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
SetProperty(global, key, Handle<Object>(global->builtins()), NONE);
RETURN_IF_EMPTY_HANDLE_VALUE(
SetProperty(global, key, Handle<Object>(global->builtins()), NONE),
false);
// Compile the JavaScript for the debugger in the debugger context.
Debugger::set_compiling_natives(true);

14
deps/v8/src/deoptimizer.cc

@ -663,7 +663,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::REGISTER: {
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
input_value,
*input_offset);
@ -690,7 +690,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
return false;
}
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [esp + %d]\n",
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
int32_value,
*input_offset);
@ -706,7 +706,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int output_reg = iterator->Next();
double double_value = input_object->Number();
if (FLAG_trace_osr) {
PrintF(" %s <- %g (double) ; [esp + %d]\n",
PrintF(" %s <- %g (double) ; [sp + %d]\n",
DoubleRegister::AllocationIndexToString(output_reg),
double_value,
*input_offset);
@ -720,7 +720,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
output_offset,
input_value,
*input_offset);
@ -749,7 +749,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
return false;
}
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- %d (int32) ; [esp + %d]\n",
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
int32_value,
*input_offset);
@ -773,12 +773,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int32_t lower = static_cast<int32_t>(int_value);
int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
output_offset + kUpperOffset,
upper,
double_value,
*input_offset);
PrintF(" [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
output_offset + kLowerOffset,
lower,
double_value,

1
deps/v8/src/execution.cc

@ -403,6 +403,7 @@ void StackGuard::ThreadLocal::Initialize() {
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
const uintptr_t kLimitSize = FLAG_stack_size * KB;
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);

2
deps/v8/src/execution.h

@ -243,8 +243,6 @@ class StackGuard : public AllStatic {
static void EnableInterrupts();
static void DisableInterrupts();
static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);

9
deps/v8/src/factory.cc

@ -334,6 +334,11 @@ Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
}
Handle<Map> Factory::GetPixelArrayElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->GetPixelArrayElementsMap(), Map);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
}
@ -580,7 +585,9 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// Set function.prototype and give the prototype a constructor
// property that refers to the function.
SetPrototypeProperty(function, prototype);
SetProperty(prototype, Factory::constructor_symbol(), function, DONT_ENUM);
// Currently safe because it is only invoked from Genesis.
SetLocalPropertyNoThrow(
prototype, Factory::constructor_symbol(), function, DONT_ENUM);
return function;
}

2
deps/v8/src/factory.h

@ -196,6 +196,8 @@ class Factory : public AllStatic {
static Handle<Map> GetSlowElementsMap(Handle<Map> map);
static Handle<Map> GetPixelArrayElementsMap(Handle<Map> map);
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.

10
deps/v8/src/flag-definitions.h

@ -134,11 +134,7 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
#ifdef V8_TARGET_ARCH_IA32
DEFINE_bool(use_osr, true, "use on-stack replacement")
#else
DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@ -231,6 +227,10 @@ DEFINE_bool(debugger_auto_break, true,
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
// execution.cc
DEFINE_int(stack_size, kPointerSize * 128,
"default size of stack region v8 is allowed to use (in KkBytes)")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
"maximum length of function source code printed in a stack trace.")
@ -374,6 +374,7 @@ DEFINE_bool(debug_script_collected_events, true,
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
//
// Debug only flags
@ -493,7 +494,6 @@ DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//

2
deps/v8/src/full-codegen.cc

@ -913,7 +913,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);

6
deps/v8/src/full-codegen.h

@ -531,8 +531,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_strict() { return function()->strict_mode(); }
StrictModeFlag strict_mode_flag() {
return function()->strict_mode() ? kStrictMode : kNonStrictMode;
return is_strict() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
@ -544,7 +545,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// indicates no inlined smi code and emits a nop after the IC call.
// or non NULL patch_site which is not activated indicates no inlined smi code
// and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative

121
deps/v8/src/gdb-jit.cc

@ -395,7 +395,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if defined(V8_TARGET_ARCH_IA32)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif defined(V8_TARGET_ARCH_X64)
@ -413,6 +413,10 @@ class ELF BASE_EMBEDDED {
// System V ABI, AMD64 Supplement
// http://www.x86-64.org/documentation/abi.pdf
header->machine = 62;
#elif defined(V8_TARGET_ARCH_ARM)
// Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
// infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
header->machine = 40;
#else
#error Unsupported target architecture.
#endif
@ -503,8 +507,7 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
#if defined(V8_TARGET_ARCH_IA32)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@ -857,14 +860,20 @@ class DebugLineSection : public ELFSection {
Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
// Used for special opcodes
const int8_t line_base = 1;
const uint8_t line_range = 7;
const int8_t max_line_incr = (line_base + line_range - 1);
const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1;
w->Write<uint16_t>(2); // Field version.
Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
uintptr_t prologue_start = w->position();
w->Write<uint8_t>(1); // Field minimum_instruction_length.
w->Write<uint8_t>(1); // Field default_is_stmt.
w->Write<int8_t>(0); // Field line_base.
w->Write<uint8_t>(2); // Field line_range.
w->Write<uint8_t>(DW_LNS_NEGATE_STMT + 1); // Field opcode_base.
w->Write<int8_t>(line_base); // Field line_base.
w->Write<uint8_t>(line_range); // Field line_range.
w->Write<uint8_t>(opcode_base); // Field opcode_base.
w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
@ -881,6 +890,7 @@ class DebugLineSection : public ELFSection {
WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
w->Write<intptr_t>(desc_->CodeStart());
w->Write<uint8_t>(DW_LNS_COPY);
intptr_t pc = 0;
intptr_t line = 1;
@ -888,29 +898,66 @@ class DebugLineSection : public ELFSection {
List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
pc_info->Sort(&ComparePCInfo);
for (int i = 0; i < pc_info->length(); i++) {
int pc_info_length = pc_info->length();
for (int i = 0; i < pc_info_length; i++) {
GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
uintptr_t pc_diff = info->pc_ - pc;
ASSERT(info->pc_ >= pc);
if (pc_diff != 0) {
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(pc_diff);
pc += pc_diff;
}
intptr_t line_diff = desc_->GetScriptLineNumber(info->pos_) - line;
if (line_diff != 0) {
w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
w->WriteSLEB128(line_diff);
line += line_diff;
// Reduce bloating in the debug line table by removing duplicate line
// entries (per DWARF2 standard).
intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
if (new_line == line) {
continue;
}
if (is_statement != info->is_statement_) {
// Mark statement boundaries. For a better debugging experience, mark
// the last pc address in the function as a statement (e.g. "}"), so that
// a user can see the result of the last line executed in the function,
// should control reach the end.
if ((i+1) == pc_info_length) {
if (!is_statement) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
}
} else if (is_statement != info->is_statement_) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
is_statement = !is_statement;
}
if (pc_diff != 0 || i == 0) {
// Generate special opcodes, if possible. This results in more compact
// debug line tables. See the DWARF 2.0 standard to learn more about
// special opcodes.
uintptr_t pc_diff = info->pc_ - pc;
intptr_t line_diff = new_line - line;
// Compute special opcode (see DWARF 2.0 standard)
intptr_t special_opcode = (line_diff - line_base) +
(line_range * pc_diff) + opcode_base;
// If special_opcode is less than or equal to 255, it can be used as a
// special opcode. If line_diff is larger than the max line increment
// allowed for a special opcode, or if line_diff is less than the minimum
// line that can be added to the line register (i.e. line_base), then
// special_opcode can't be used.
if ((special_opcode >= opcode_base) && (special_opcode <= 255) &&
(line_diff <= max_line_incr) && (line_diff >= line_base)) {
w->Write<uint8_t>(special_opcode);
} else {
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(pc_diff);
w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
w->WriteSLEB128(line_diff);
w->Write<uint8_t>(DW_LNS_COPY);
}
// Increment the pc and line operands.
pc += pc_diff;
line += line_diff;
}
// Advance the pc to the end of the routine, since the end sequence opcode
// requires this.
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(desc_->CodeSize() - pc);
WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
total_length.set(static_cast<uint32_t>(w->position() - start));
return true;
@ -1237,6 +1284,20 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry) {
#if defined(DEBUG) && !defined(WIN32)
static int file_num = 0;
if (FLAG_gdbjit_dump) {
static const int kMaxFileNameSize = 64;
static const char* kElfFilePrefix = "/tmp/elfdump";
static const char* kObjFileExt = ".o";
char file_name[64];
OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
kElfFilePrefix, file_num++, kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != NULL) entry->next_->prev_ = entry;
__jit_debug_descriptor.first_entry_ =
@ -1294,7 +1355,13 @@ static bool SameCodeObjects(void* key1, void* key2) {
}
static HashMap entries(&SameCodeObjects);
static HashMap* GetEntries() {
static HashMap* entries = NULL;
if (entries == NULL) {
entries = new HashMap(&SameCodeObjects);
}
return entries;
}
static uint32_t HashForCodeObject(Code* code) {
@ -1398,7 +1465,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit) return;
AssertNoAllocation no_gc;
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
@ -1411,7 +1478,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
entries.Remove(code, HashForCodeObject(code));
GetEntries()->Remove(code, HashForCodeObject(code));
return;
}
@ -1464,7 +1531,9 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), false);
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
if (e == NULL) return;
if (IsLineInfoTagged(e->value)) {
@ -1475,14 +1544,14 @@ void GDBJITInterface::RemoveCode(Code* code) {
DestroyCodeEntry(entry);
}
e->value = NULL;
entries.Remove(code, HashForCodeObject(code));
GetEntries()->Remove(code, HashForCodeObject(code));
}
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
e->value = TagLineInfo(line_info);
}

12
deps/v8/src/handles.cc

@ -290,6 +290,17 @@ Handle<Object> SetLocalPropertyIgnoreAttributes(
}
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
ASSERT(!Top::has_pending_exception());
CHECK(!SetLocalPropertyIgnoreAttributes(
object, key, value, attributes).is_null());
CHECK(!Top::has_pending_exception());
}
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@ -808,6 +819,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!Top::has_pending_exception());
bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();

7
deps/v8/src/handles.h

@ -223,6 +223,13 @@ Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<Object> value,
PropertyAttributes attributes);
// Used to set local properties on the object we totally control
// and which therefore has no accessors and alikes.
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes = NONE);
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,

2
deps/v8/src/heap.cc

@ -4128,7 +4128,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh();
a += kPointerSize) {

1
deps/v8/src/heap.h

@ -184,6 +184,7 @@ namespace internal {
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
V(KeyedStorePixelArray_symbol, "KeyedStorePixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \

186
deps/v8/src/hydrogen-instructions.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -281,6 +281,33 @@ void HValue::SetOperandAt(int index, HValue* value) {
}
void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 3) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HValue::ReplaceAndDelete(HValue* other) {
ReplaceValue(other);
Delete();
@ -438,9 +465,16 @@ void HInstruction::PrintTo(StringStream* stream) const {
void HInstruction::Unlink() {
ASSERT(IsLinked());
ASSERT(!IsControlInstruction()); // Must never move control instructions.
ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
ASSERT(previous_ != NULL);
previous_->next_ = next_;
if (next_ == NULL) {
ASSERT(block()->last() == this);
block()->set_last(previous_);
} else {
next_->previous_ = previous_;
}
clear_block();
if (previous_ != NULL) previous_->next_ = next_;
if (next_ != NULL) next_->previous_ = previous_;
}
@ -527,26 +561,64 @@ void HInstruction::Verify() {
#endif
HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
for (int i = 0; i < count; ++i) arguments_[i] = NULL;
set_representation(Representation::Tagged());
SetAllSideEffects();
void HCall::PrintDataTo(StringStream* stream) const {
stream->Add("#%d", argument_count());
}
void HCall::PrintDataTo(StringStream* stream) const {
stream->Add("(");
for (int i = 0; i < arguments_.length(); ++i) {
if (i != 0) stream->Add(", ");
arguments_.at(i)->PrintNameTo(stream);
void HUnaryCall::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
}
void HBinaryCall::PrintDataTo(StringStream* stream) const {
first()->PrintNameTo(stream);
stream->Add(" ");
second()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
}
void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
if (IsApplyFunction()) {
stream->Add("optimized apply ");
} else {
stream->Add("%o ", function()->shared()->DebugName());
}
stream->Add(")");
HCall::PrintDataTo(stream);
}
void HCallNamed::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallGlobal::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallKnownGlobal::PrintDataTo(StringStream* stream) const {
stream->Add("o ", target()->shared()->DebugName());
HCall::PrintDataTo(stream);
}
void HCallRuntime::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HCall::PrintDataTo(stream);
}
void HClassOfTest::PrintDataTo(StringStream* stream) const {
stream->Add("class_of_test(");
value()->PrintTo(stream);
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
}
@ -560,22 +632,6 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
void HCall::SetArgumentAt(int index, HPushArgument* push_argument) {
push_argument->set_argument_index(index);
SetOperandAt(index, push_argument);
}
void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
if (IsApplyFunction()) {
stream->Add("SPECIAL function: apply");
} else {
stream->Add("%s", *(function()->shared()->DebugName()->ToCString()));
}
HCall::PrintDataTo(stream);
}
void HControlInstruction::PrintDataTo(StringStream* stream) const {
if (FirstSuccessor() != NULL) {
int first_id = FirstSuccessor()->block_id();
@ -663,14 +719,6 @@ void HTypeofIs::PrintDataTo(StringStream* stream) const {
}
void HPushArgument::PrintDataTo(StringStream* stream) const {
HUnaryOperation::PrintDataTo(stream);
if (argument_index() != -1) {
stream->Add(" [%d]", argument_index_);
}
}
void HChange::PrintDataTo(StringStream* stream) const {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
@ -699,42 +747,19 @@ void HCheckFunction::PrintDataTo(StringStream* stream) const {
}
void HCallKeyed::PrintDataTo(StringStream* stream) const {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("](");
for (int i = 1; i < arguments_.length(); ++i) {
if (i != 1) stream->Add(", ");
arguments_.at(i)->PrintNameTo(stream);
}
stream->Add(")");
}
void HCallNamed::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
}
void HCallGlobal::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
void HCallStub::PrintDataTo(StringStream* stream) const {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
HUnaryCall::PrintDataTo(stream);
}
void HCallRuntime::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
}
void HCallStub::PrintDataTo(StringStream* stream) const {
stream->Add("%s(%d)",
CodeStub::MajorName(major_key_, false),
argument_count_);
void HInstanceOf::PrintDataTo(StringStream* stream) const {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
stream->Add(" ");
context()->PrintNameTo(stream);
}
@ -900,17 +925,6 @@ void HPhi::AddInput(HValue* value) {
}
bool HPhi::HasReceiverOperand() {
for (int i = 0; i < OperandCount(); i++) {
if (OperandAt(i)->IsParameter() &&
HParameter::cast(OperandAt(i))->index() == 0) {
return true;
}
}
return false;
}
HValue* HPhi::GetRedundantReplacement() const {
HValue* candidate = NULL;
int count = OperandCount();
@ -1153,6 +1167,14 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const {
}
void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
external_pointer()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
}
void HStoreNamed::PrintDataTo(StringStream* stream) const {
object()->PrintNameTo(stream);
stream->Add(".");

441
deps/v8/src/hydrogen-instructions.h

@ -48,6 +48,7 @@ class LChunkBuilder;
#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
V(BinaryCall) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(Call) \
@ -58,6 +59,7 @@ class LChunkBuilder;
V(Phi) \
V(StoreKeyed) \
V(StoreNamed) \
V(UnaryCall) \
V(UnaryControlInstruction) \
V(UnaryOperation) \
HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
@ -105,6 +107,7 @@ class LChunkBuilder;
V(EnterInlined) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@ -113,6 +116,7 @@ class LChunkBuilder;
V(IsNull) \
V(IsObject) \
V(IsSmi) \
V(IsConstructCall) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
V(JSArrayLength) \
@ -126,12 +130,15 @@ class LChunkBuilder;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@ -163,6 +170,7 @@ class LChunkBuilder;
V(InobjectFields) \
V(BackingStoreFields) \
V(ArrayElements) \
V(PixelArrayElements) \
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
@ -288,6 +296,7 @@ class Representation {
kTagged,
kDouble,
kInteger32,
kExternal,
kNumRepresentations
};
@ -297,6 +306,7 @@ class Representation {
static Representation Tagged() { return Representation(kTagged); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@ -307,6 +317,7 @@ class Representation {
bool IsTagged() const { return kind_ == kTagged; }
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsDouble() const { return kind_ == kDouble; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
return kind_ == kInteger32 || kind_ == kDouble;
}
@ -601,9 +612,6 @@ class HValue: public ZoneObject {
virtual HType CalculateInferredType() const;
// Helper for type conversions used by normal and phi instructions.
void InsertInputConversion(HInstruction* previous, int index, HType type);
#ifdef DEBUG
virtual void Verify() = 0;
#endif
@ -1040,27 +1048,15 @@ class HLeaveInlined: public HInstruction {
class HPushArgument: public HUnaryOperation {
public:
explicit HPushArgument(HValue* value)
: HUnaryOperation(value), argument_index_(-1) {
set_representation(Representation::Tagged());
}
explicit HPushArgument(HValue* value) : HUnaryOperation(value) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* argument() const { return OperandAt(0); }
int argument_index() const { return argument_index_; }
void set_argument_index(int index) {
ASSERT(argument_index_ == -1 || index == argument_index_);
argument_index_ = index;
}
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
private:
int argument_index_;
};
@ -1123,36 +1119,80 @@ class HGlobalReceiver: public HUnaryOperation {
class HCall: public HInstruction {
public:
// Construct a call with uninitialized arguments. The argument count
// includes the receiver.
explicit HCall(int count);
// The argument count includes the receiver.
explicit HCall(int argument_count) : argument_count_(argument_count) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
virtual HType CalculateInferredType() const { return HType::Tagged(); }
// TODO(3190496): This needs a cleanup. We don't want the arguments
// be operands of the call instruction. This results in bad code quality.
virtual int argument_count() const { return arguments_.length(); }
virtual int OperandCount() const { return argument_count(); }
virtual HValue* OperandAt(int index) const { return arguments_[index]; }
virtual HPushArgument* PushArgumentAt(int index) const {
return HPushArgument::cast(OperandAt(index));
virtual int argument_count() const { return argument_count_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(Call)
private:
int argument_count_;
};
class HUnaryCall: public HCall {
public:
HUnaryCall(HValue* value, int argument_count)
: HCall(argument_count), value_(NULL) {
SetOperandAt(0, value);
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* value() const { return value_; }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const {
ASSERT(index == 0);
return value_;
}
virtual HValue* ArgumentAt(int index) const {
return PushArgumentAt(index)->argument();
DECLARE_INSTRUCTION(UnaryCall)
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
ASSERT(index == 0);
value_ = value;
}
private:
HValue* value_;
};
class HBinaryCall: public HCall {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
: HCall(argument_count) {
SetOperandAt(0, first);
SetOperandAt(1, second);
}
virtual void SetArgumentAt(int index, HPushArgument* push_argument);
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(Call)
HValue* first() const { return operands_[0]; }
HValue* second() const { return operands_[1]; }
virtual int OperandCount() const { return 2; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
DECLARE_INSTRUCTION(BinaryCall)
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
arguments_[index] = value;
operands_[index] = value;
}
int argument_count_;
Vector<HValue*> arguments_;
private:
HOperandVector<2> operands_;
};
@ -1162,6 +1202,7 @@ class HCallConstantFunction: public HCall {
: HCall(argument_count), function_(function) { }
Handle<JSFunction> function() const { return function_; }
bool IsApplyFunction() const {
return function_->code() == Builtins::builtin(Builtins::FunctionApply);
}
@ -1175,42 +1216,32 @@ class HCallConstantFunction: public HCall {
};
class HCallKeyed: public HCall {
class HCallKeyed: public HBinaryCall {
public:
HCallKeyed(HValue* key, int argument_count)
: HCall(argument_count + 1) {
SetOperandAt(0, key);
HCallKeyed(HValue* context, HValue* key, int argument_count)
: HBinaryCall(context, key, argument_count) {
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
// TODO(3190496): This is a hack to get an additional operand that
// is not an argument to work with the current setup. This _needs_ a cleanup.
// (see HCall)
virtual void PrintDataTo(StringStream* stream) const;
HValue* key() const { return OperandAt(0); }
virtual int argument_count() const { return arguments_.length() - 1; }
virtual int OperandCount() const { return arguments_.length(); }
virtual HValue* OperandAt(int index) const { return arguments_[index]; }
virtual HPushArgument* PushArgumentAt(int index) const {
return HPushArgument::cast(OperandAt(index + 1));
}
virtual void SetArgumentAt(int index, HPushArgument* push_argument) {
HCall::SetArgumentAt(index + 1, push_argument);
}
HValue* context() const { return first(); }
HValue* key() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
};
class HCallNamed: public HCall {
class HCallNamed: public HUnaryCall {
public:
HCallNamed(Handle<String> name, int argument_count)
: HCall(argument_count), name_(name) { }
HCallNamed(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
@ -1220,21 +1251,27 @@ class HCallNamed: public HCall {
};
class HCallFunction: public HCall {
class HCallFunction: public HUnaryCall {
public:
explicit HCallFunction(int argument_count) : HCall(argument_count) { }
HCallFunction(HValue* context, int argument_count)
: HUnaryCall(context, argument_count) {
}
HValue* context() const { return value(); }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
};
class HCallGlobal: public HCall {
class HCallGlobal: public HUnaryCall {
public:
HCallGlobal(Handle<String> name, int argument_count)
: HCall(argument_count), name_(name) { }
HCallGlobal(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
@ -1246,10 +1283,11 @@ class HCallGlobal: public HCall {
class HCallKnownGlobal: public HCall {
public:
HCallKnownGlobal(Handle<JSFunction> target,
int argument_count)
HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall(argument_count), target_(target) { }
virtual void PrintDataTo(StringStream* stream) const;
Handle<JSFunction> target() const { return target_; }
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
@ -1259,15 +1297,18 @@ class HCallKnownGlobal: public HCall {
};
class HCallNew: public HCall {
class HCallNew: public HBinaryCall {
public:
explicit HCallNew(int argument_count) : HCall(argument_count) { }
HCallNew(HValue* context, HValue* constructor, int argument_count)
: HBinaryCall(context, constructor, argument_count) {
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
HValue* constructor() const { return ArgumentAt(0); }
HValue* context() const { return first(); }
HValue* constructor() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
};
@ -1333,6 +1374,27 @@ class HFixedArrayLength: public HUnaryOperation {
};
class HPixelArrayLength: public HUnaryOperation {
public:
explicit HPixelArrayLength(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
// The result of this instruction is idempotent as long as its inputs don't
// change. The length of a pixel array cannot change once set, so it's not
// necessary to introduce a kDependsOnArrayLengths or any other dependency.
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HBitNot: public HUnaryOperation {
public:
explicit HBitNot(HValue* value) : HUnaryOperation(value) {
@ -1451,6 +1513,30 @@ class HLoadElements: public HUnaryOperation {
};
class HLoadPixelArrayExternalPointer: public HUnaryOperation {
public:
explicit HLoadPixelArrayExternalPointer(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::External());
// The result of this instruction is idempotent as long as its inputs don't
// change. The external array of a pixel array elements object cannot
// change once set, so it's no necessary to introduce any additional
// dependencies on top of the inputs.
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HCheckMap: public HUnaryOperation {
public:
HCheckMap(HValue* value, Handle<Map> map)
@ -1701,7 +1787,7 @@ class HPhi: public HValue {
HValue* GetRedundantReplacement() const;
void AddInput(HValue* value);
bool HasReceiverOperand();
bool IsReceiver() { return merged_index_ == 0; }
int merged_index() const { return merged_index_; }
@ -1860,7 +1946,6 @@ class HBinaryOperation: public HInstruction {
operands_[index] = value;
}
private:
HOperandVector<2> operands_;
};
@ -2072,7 +2157,11 @@ class HCompare: public HBinaryOperation {
}
void SetInputRepresentation(Representation r);
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
}
@ -2110,7 +2199,10 @@ class HCompareJSObjectEq: public HBinaryOperation {
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -2129,7 +2221,11 @@ class HUnaryPredicate: public HUnaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -2179,6 +2275,24 @@ class HIsSmi: public HUnaryPredicate {
};
class HIsConstructCall: public HInstruction {
public:
HIsConstructCall() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HHasInstanceType: public HUnaryPredicate {
public:
HHasInstanceType(HValue* value, InstanceType type)
@ -2218,6 +2332,17 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
};
class HGetCachedArrayIndex: public HUnaryPredicate {
public:
explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HClassOfTest: public HUnaryPredicate {
public:
HClassOfTest(HValue* value, Handle<String> class_name)
@ -2261,20 +2386,42 @@ class HTypeofIs: public HUnaryPredicate {
};
class HInstanceOf: public HBinaryOperation {
class HInstanceOf: public HInstruction {
public:
HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
HInstanceOf(HValue* context, HValue* left, HValue* right) {
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
HValue* context() const { return operands_[0]; }
HValue* left() const { return operands_[1]; }
HValue* right() const { return operands_[2]; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
operands_[index] = value;
}
private:
HOperandVector<3> operands_;
};
@ -2543,18 +2690,17 @@ class HParameter: public HInstruction {
};
class HCallStub: public HInstruction {
class HCallStub: public HUnaryCall {
public:
HCallStub(CodeStub::Major major_key, int argument_count)
: major_key_(major_key),
argument_count_(argument_count),
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
major_key_(major_key),
transcendental_type_(TranscendentalCache::kNumberOfCaches) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
CodeStub::Major major_key() { return major_key_; }
int argument_count() { return argument_count_; }
HValue* context() const { return value(); }
void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
transcendental_type_ = transcendental_type;
@ -2562,13 +2708,13 @@ class HCallStub: public HInstruction {
TranscendentalCache::Type transcendental_type() {
return transcendental_type_;
}
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
private:
CodeStub::Major major_key_;
int argument_count_;
TranscendentalCache::Type transcendental_type_;
};
@ -2746,15 +2892,16 @@ class HLoadNamedField: public HUnaryOperation {
};
class HLoadNamedGeneric: public HUnaryOperation {
class HLoadNamedGeneric: public HBinaryOperation {
public:
HLoadNamedGeneric(HValue* object, Handle<Object> name)
: HUnaryOperation(object), name_(name) {
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
: HBinaryOperation(context, object), name_(name) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
HValue* object() const { return OperandAt(0); }
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
Handle<Object> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
@ -2829,19 +2976,67 @@ class HLoadKeyedFastElement: public HLoadKeyed {
};
class HLoadPixelArrayElement: public HBinaryOperation {
public:
HLoadPixelArrayElement(HValue* external_elements, HValue* key)
: HBinaryOperation(external_elements, key) {
set_representation(Representation::Integer32());
SetFlag(kDependsOnPixelArrayElements);
// Native code could change the pixel array.
SetFlag(kDependsOnCalls);
SetFlag(kUseGVN);
}
virtual void PrintDataTo(StringStream* stream) const;
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
return (index == 1) ? Representation::Integer32()
: Representation::External();
}
HValue* external_pointer() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load_pixel_array_element")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HLoadKeyedGeneric: public HLoadKeyed {
public:
HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key)
: HLoadKeyed(obj, key), context_(NULL) {
SetOperandAt(2, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* key() const { return operands_[1]; }
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const {
return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
class HStoreNamed: public HBinaryOperation {
public:
HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
HStoreNamed(HValue* obj, Handle<String> name, HValue* val)
: HBinaryOperation(obj, val), name_(name) {
}
@ -2852,21 +3047,21 @@ class HStoreNamed: public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream) const;
HValue* object() const { return OperandAt(0); }
Handle<Object> name() const { return name_; }
Handle<String> name() const { return name_; }
HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); }
DECLARE_INSTRUCTION(StoreNamed)
private:
Handle<Object> name_;
Handle<String> name_;
};
class HStoreNamedField: public HStoreNamed {
public:
HStoreNamedField(HValue* obj,
Handle<Object> name,
Handle<String> name,
HValue* val,
bool in_object,
int offset)
@ -2905,12 +3100,32 @@ class HStoreNamedField: public HStoreNamed {
class HStoreNamedGeneric: public HStoreNamed {
public:
HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
: HStoreNamed(obj, name, val) {
HStoreNamedGeneric(HValue* context,
HValue* object,
Handle<String> name,
HValue* value)
: HStoreNamed(object, name, value), context_(NULL) {
SetOperandAt(2, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* value() const { return operands_[1]; }
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const {
return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
@ -2945,7 +3160,6 @@ class HStoreKeyed: public HInstruction {
operands_[index] = value;
}
private:
HOperandVector<3> operands_;
};
@ -2970,12 +3184,33 @@ class HStoreKeyedFastElement: public HStoreKeyed {
class HStoreKeyedGeneric: public HStoreKeyed {
public:
HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
: HStoreKeyed(obj, key, val) {
HStoreKeyedGeneric(HValue* context,
HValue* object,
HValue* key,
HValue* value)
: HStoreKeyed(object, key, value), context_(NULL) {
SetOperandAt(3, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* key() const { return operands_[1]; }
HValue* value() const { return operands_[2]; }
virtual int OperandCount() const { return 4; }
virtual HValue* OperandAt(int index) const {
return (index < 3) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
@ -3077,22 +3312,36 @@ class HArrayLiteral: public HMaterializedLiteral {
class HObjectLiteral: public HMaterializedLiteral {
public:
HObjectLiteral(Handle<FixedArray> constant_properties,
HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties,
bool fast_elements,
int literal_index,
int depth)
: HMaterializedLiteral(literal_index, depth),
context_(NULL),
constant_properties_(constant_properties),
fast_elements_(fast_elements) {}
fast_elements_(fast_elements) {
SetOperandAt(0, context);
}
HValue* context() const { return context_; }
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
bool fast_elements() const { return fast_elements_; }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const { return context_; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
context_ = value;
}
private:
HValue* context_;
Handle<FixedArray> constant_properties_;
bool fast_elements_;
};

462
deps/v8/src/hydrogen.cc

@ -65,6 +65,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
first_instruction_index_(-1),
last_instruction_index_(-1),
deleted_phis_(4),
parent_loop_header_(NULL),
is_inline_return_target_(false) {
}
@ -105,18 +106,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
if (first_ == NULL) {
HBlockEntry* entry = new HBlockEntry();
entry->InitializeAsFirst(this);
first_ = entry;
first_ = last_ = entry;
}
instr->InsertAfter(GetLastInstruction());
}
HInstruction* HBasicBlock::GetLastInstruction() {
if (end_ != NULL) return end_->previous();
if (first_ == NULL) return NULL;
if (last_ == NULL) last_ = first_;
while (last_->next() != NULL) last_ = last_->next();
return last_;
instr->InsertAfter(last_);
last_ = instr;
}
@ -177,7 +170,7 @@ void HBasicBlock::SetJoinId(int id) {
for (int i = 0; i < length; i++) {
HBasicBlock* predecessor = predecessors_[i];
ASSERT(predecessor->end()->IsGoto());
HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction());
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
// We only need to verify the ID once.
ASSERT(i != 0 ||
predecessor->last_environment()->closure()->shared()
@ -293,20 +286,6 @@ void HBasicBlock::Verify() {
// Check that every block is finished.
ASSERT(IsFinished());
ASSERT(block_id() >= 0);
// Verify that all blocks targetting a branch target, have the same boolean
// value on top of their expression stack.
if (!cond().is_null()) {
ASSERT(predecessors()->length() > 0);
for (int i = 1; i < predecessors()->length(); i++) {
HBasicBlock* pred = predecessors()->at(i);
HValue* top = pred->last_environment()->Top();
ASSERT(top->IsConstant());
Object* a = *HConstant::cast(top)->handle();
Object* b = *cond();
ASSERT(a == b);
}
}
}
#endif
@ -870,13 +849,11 @@ void HGraph::EliminateRedundantPhis() {
}
uses_to_replace.Rewind(0);
block->RemovePhi(phi);
} else if (phi->HasNoUses() &&
!phi->HasReceiverOperand() &&
FLAG_eliminate_dead_phis) {
// We can't eliminate phis that have the receiver as an operand
// because in case of throwing an error we need the correct
// receiver value in the environment to construct a corrent
// stack trace.
} else if (FLAG_eliminate_dead_phis && phi->HasNoUses() &&
!phi->IsReceiver()) {
// We can't eliminate phis in the receiver position in the environment
// because in case of throwing an error we need this value to
// construct a stack trace.
block->RemovePhi(phi);
block->RecordDeletedPhi(phi->merged_index());
}
@ -1815,17 +1792,15 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
bool is_truncating) {
// Insert the representation change right before its use. For phi-uses we
// insert at the end of the corresponding predecessor.
HBasicBlock* insert_block = use->block();
HInstruction* next = NULL;
if (use->IsPhi()) {
int index = 0;
while (use->OperandAt(index) != value) ++index;
insert_block = insert_block->predecessors()->at(index);
next = use->block()->predecessors()->at(index)->end();
} else {
next = HInstruction::cast(use);
}
HInstruction* next = (insert_block == use->block())
? HInstruction::cast(use)
: insert_block->end();
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@ -2197,10 +2172,8 @@ void HGraphBuilder::VisitForControl(Expression* expr,
}
HValue* HGraphBuilder::VisitArgument(Expression* expr) {
void HGraphBuilder::VisitArgument(Expression* expr) {
VisitForValue(expr);
if (HasStackOverflow() || !subgraph()->HasExit()) return NULL;
return environment()->Top();
}
@ -2319,29 +2292,15 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) {
}
void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
const int kMaxStubArguments = 4;
ASSERT_GE(kMaxStubArguments, argument_count);
// Push the arguments on the stack.
HValue* arguments[kMaxStubArguments];
for (int i = argument_count - 1; i >= 0; i--) {
arguments[i] = Pop();
}
for (int i = 0; i < argument_count; i++) {
AddInstruction(new HPushArgument(arguments[i]));
}
}
void HGraphBuilder::ProcessCall(HCall* call) {
for (int i = call->argument_count() - 1; i >= 0; --i) {
HValue* value = Pop();
HPushArgument* push = new HPushArgument(value);
call->SetArgumentAt(i, push);
void HGraphBuilder::PreProcessCall(HCall* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count);
for (int i = 0; i < count; ++i) {
arguments.Add(Pop());
}
for (int i = 0; i < call->argument_count(); ++i) {
AddInstruction(call->PushArgumentAt(i));
while (!arguments.is_empty()) {
AddInstruction(new HPushArgument(arguments.RemoveLast()));
}
}
@ -2952,6 +2911,9 @@ void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
if (is_store && lookup->IsReadOnly()) {
BAILOUT("read-only global variable");
}
if (lookup->holder() != *global) {
BAILOUT("global property on prototype of global object");
}
}
@ -3021,7 +2983,10 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HObjectLiteral* literal = (new HObjectLiteral(expr->constant_properties(),
HContext* context = new HContext;
AddInstruction(context);
HObjectLiteral* literal = (new HObjectLiteral(context,
expr->constant_properties(),
expr->fast_elements(),
expr->literal_index(),
expr->depth()));
@ -3048,7 +3013,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VISIT_FOR_VALUE(value);
HValue* value = Pop();
Handle<String> name = Handle<String>::cast(key->handle());
AddInstruction(new HStoreNamedGeneric(literal, name, value));
HStoreNamedGeneric* store =
new HStoreNamedGeneric(context, literal, name, value);
AddInstruction(store);
AddSimulate(key->id());
} else {
VISIT_FOR_EFFECT(value);
@ -3111,53 +3078,47 @@ void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
}
HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
ZoneList<HSubgraph*>* subgraphs,
HValue* receiver,
HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
ZoneMapList* maps,
ZoneList<HSubgraph*>* body_graphs,
HSubgraph* default_graph,
int join_id) {
ASSERT(subgraphs->length() == (maps->length() + 1));
// Build map compare subgraphs for all but the first map.
ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1);
for (int i = maps->length() - 1; i > 0; --i) {
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* else_subgraph =
(i == (maps->length() - 1))
? subgraphs->last()
: map_compare_subgraphs.last();
HCompareMap* compare = new HCompareMap(receiver,
maps->at(i),
subgraphs->at(i)->entry_block(),
else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
map_compare_subgraphs.Add(subgraph);
}
// Generate first map check to end the current block.
ASSERT(maps->length() == body_graphs->length());
HBasicBlock* join_block = graph()->CreateBasicBlock();
AddInstruction(new HCheckNonSmi(receiver));
HSubgraph* else_subgraph =
(maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
HCompareMap* compare = new HCompareMap(receiver,
Handle<Map>(maps->first()),
subgraphs->first()->entry_block(),
else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
// Join all the call subgraphs in a new basic block and make
// this basic block the current basic block.
HBasicBlock* join_block = graph_->CreateBasicBlock();
for (int i = 0; i < subgraphs->length(); ++i) {
HSubgraph* subgraph = subgraphs->at(i);
if (subgraph->HasExit()) {
for (int i = 0; i < maps->length(); ++i) {
// Build the branches, connect all the target subgraphs to the join
// block. Use the default as a target of the last branch.
HSubgraph* if_true = body_graphs->at(i);
HSubgraph* if_false = (i == maps->length() - 1)
? default_graph
: CreateBranchSubgraph(environment());
HCompareMap* compare =
new HCompareMap(receiver,
maps->at(i),
if_true->entry_block(),
if_false->entry_block());
subgraph()->exit_block()->Finish(compare);
if (if_true->HasExit()) {
// In an effect context the value of the type switch is not needed.
// There is no need to merge it at the join block only to discard it.
HBasicBlock* subgraph_exit = subgraph->exit_block();
if (ast_context()->IsEffect()) {
subgraph_exit->last_environment()->Drop(1);
if_true->exit_block()->last_environment()->Drop(1);
}
subgraph_exit->Goto(join_block);
if_true->exit_block()->Goto(join_block);
}
subgraph()->set_exit_block(if_false->exit_block());
}
// Connect the default if necessary.
if (subgraph()->HasExit()) {
if (ast_context()->IsEffect()) {
environment()->Drop(1);
}
subgraph()->exit_block()->Goto(join_block);
}
if (join_block->predecessors()->is_empty()) return NULL;
@ -3228,7 +3189,9 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value) {
return new HStoreNamedGeneric(object, name, value);
HContext* context = new HContext;
AddInstruction(context);
return new HStoreNamedGeneric(context, object, name, value);
}
@ -3261,7 +3224,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxStorePolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxStorePolymorphism);
// Build subgraphs for each of the specific maps.
@ -3273,7 +3236,6 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Handle<Map> map = types->at(i);
LookupResult lookup;
if (ComputeStoredField(map, name, &lookup)) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HInstruction* instr =
@ -3281,6 +3243,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3290,7 +3253,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// If none of the properties were named fields we generate a
// generic store.
if (maps.length() == 0) {
HInstruction* instr = new HStoreNamedGeneric(object, name, value);
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
@ -3298,22 +3261,20 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
ast_context()->ReturnValue(Pop());
} else {
// Build subgraph for generic store through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = new HStoreNamedGeneric(object, name, value);
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -3354,7 +3315,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
return;
} else {
instr = new HStoreNamedGeneric(object, name, value);
instr = BuildStoreNamedGeneric(object, name, value);
}
} else {
@ -3414,10 +3375,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
BinaryOperation* operation = expr->binary_operation();
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
BAILOUT("non-stack/non-global in compound assignment");
}
VISIT_FOR_VALUE(operation);
if (var->is_global()) {
@ -3425,8 +3382,16 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Top(),
expr->position(),
expr->AssignmentId());
} else {
} else if (var->IsStackAllocated()) {
Bind(var, Top());
} else if (var->IsContextSlot()) {
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
BAILOUT("compound assignment to lookup slot");
}
ast_context()->ReturnValue(Pop());
@ -3474,7 +3439,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
bool is_fast_elements = prop->IsMonomorphic() &&
prop->GetMonomorphicReceiverType()->has_fast_elements();
HInstruction* load = is_fast_elements
? BuildLoadKeyedFastElement(obj, key, prop)
: BuildLoadKeyedGeneric(obj, key);
@ -3589,7 +3553,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxLoadPolymorphism);
// Build subgraphs for each of the specific maps.
@ -3602,7 +3566,6 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
LookupResult lookup;
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty() && lookup.type() == FIELD) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HLoadNamedField* instr =
@ -3610,6 +3573,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
instr->set_position(expr->position());
instr->ClearFlag(HValue::kUseGVN); // Don't do GVN on polymorphic loads.
PushAndAdd(instr);
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3624,21 +3588,19 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
ast_context()->ReturnInstruction(instr, expr->id());
} else {
// Build subgraph for generic load through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position());
PushAndAdd(instr);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -3677,7 +3639,9 @@ HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
return new HLoadNamedGeneric(obj, name);
HContext* context = new HContext;
AddInstruction(context);
return new HLoadNamedGeneric(context, obj, name);
}
@ -3706,7 +3670,9 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
return new HLoadKeyedGeneric(object, key);
HContext* context = new HContext;
AddInstruction(context);
return new HLoadKeyedGeneric(context, object, key);
}
@ -3734,10 +3700,34 @@ HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
}
HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object,
HValue* key,
Property* expr) {
ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
AddInstruction(new HCheckNonSmi(object));
Handle<Map> map = expr->GetMonomorphicReceiverType();
ASSERT(!map->has_fast_elements());
ASSERT(map->has_pixel_array_elements());
AddInstruction(new HCheckMap(object, map));
HLoadElements* elements = new HLoadElements(object);
AddInstruction(elements);
HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
AddInstruction(new HBoundsCheck(key, length));
HLoadPixelArrayExternalPointer* external_elements =
new HLoadPixelArrayExternalPointer(elements);
AddInstruction(external_elements);
HLoadPixelArrayElement* pixel_array_value =
new HLoadPixelArrayElement(external_elements, key);
return pixel_array_value;
}
HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
HValue* key,
HValue* value) {
return new HStoreKeyedGeneric(object, key, value);
HContext* context = new HContext;
AddInstruction(context);
return new HStoreKeyedGeneric(context, object, key, value);
}
@ -3841,12 +3831,20 @@ void HGraphBuilder::VisitProperty(Property* expr) {
HValue* key = Pop();
HValue* obj = Pop();
bool is_fast_elements = expr->IsMonomorphic() &&
expr->GetMonomorphicReceiverType()->has_fast_elements();
instr = is_fast_elements
? BuildLoadKeyedFastElement(obj, key, expr)
: BuildLoadKeyedGeneric(obj, key);
if (expr->IsMonomorphic()) {
Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
// An object has either fast elements or pixel array elements, but never
// both. Pixel array maps that are assigned to pixel array elements are
// always created with the fast elements flag cleared.
if (receiver_type->has_pixel_array_elements()) {
instr = BuildLoadKeyedPixelArrayElement(obj, key, expr);
} else if (receiver_type->has_fast_elements()) {
instr = BuildLoadKeyedFastElement(obj, key, expr);
}
}
if (instr == NULL) {
instr = BuildLoadKeyedGeneric(obj, key);
}
}
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
@ -3879,7 +3877,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
int number_of_types = Min(types->length(), kMaxCallPolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxCallPolymorphism);
// Build subgraphs for each of the specific maps.
@ -3890,7 +3888,6 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
for (int i = 0; i < number_of_types; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
AddCheckConstantFunction(expr, receiver, map, false);
@ -3904,9 +3901,10 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
CHECK_BAILOUT;
HCall* call = new HCallConstantFunction(expr->target(), argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
PushAndAdd(call);
}
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3916,28 +3914,30 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// If we couldn't compute the target for any of the maps just perform an
// IC call.
if (maps.length() == 0) {
HCall* call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
} else {
// Build subgraph for generic call through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HCall* call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
PushAndAdd(call);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -4004,6 +4004,9 @@ bool HGraphBuilder::TryInline(Call* expr) {
CompilationInfo inner_info(target);
if (!ParserApi::Parse(&inner_info) ||
!Scope::Analyze(&inner_info)) {
if (Top::has_pending_exception()) {
SetStackOverflow();
}
return false;
}
FunctionLiteral* function = inner_info.function();
@ -4348,9 +4351,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallKeyed(key, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
Drop(1); // Key.
ast_context()->ReturnInstruction(call, expr->id());
return;
@ -4362,7 +4367,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (TryCallApply(expr)) return;
CHECK_BAILOUT;
HValue* receiver = VisitArgument(prop->obj());
VisitArgument(prop->obj());
CHECK_BAILOUT;
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@ -4372,6 +4377,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
HValue* receiver =
environment()->ExpressionStackAt(expr->arguments()->length());
if (expr->IsMonomorphic()) {
Handle<Map> receiver_map =
(types == NULL) ? Handle<Map>::null() : types->first();
@ -4387,7 +4394,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the
// IC when a primitive receiver check is required.
call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallNamed(context, name, argument_count);
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
@ -4416,7 +4425,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
} else {
call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallNamed(context, name, argument_count);
}
} else {
@ -4486,7 +4497,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallGlobal(var->name(), argument_count);
call = new HCallGlobal(context, var->name(), argument_count);
}
} else {
@ -4498,12 +4509,12 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallFunction(argument_count);
call = new HCallFunction(context, argument_count);
}
}
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@ -4516,10 +4527,16 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HCall* call = new HCallNew(argument_count);
HContext* context = new HContext;
AddInstruction(context);
// The constructor is both an operand to the instruction and an argument
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
HCall* call = new HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@ -4573,7 +4590,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
HCall* call = new HCallRuntime(name, expr->function(), argument_count);
call->set_position(RelocInfo::kNoPosition);
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
}
@ -4600,12 +4617,18 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
// The subexpression does not have side effects.
ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
HValue* key = Pop();
HValue* obj = Pop();
ast_context()->ReturnInstruction(new HDeleteProperty(obj, key),
expr->id());
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
ast_context()->ReturnValue(graph()->GetConstantFalse());
} else {
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
HValue* key = Pop();
HValue* obj = Pop();
HDeleteProperty* instr = new HDeleteProperty(obj, key);
ast_context()->ReturnInstruction(instr, expr->id());
}
} else if (var->is_global()) {
BAILOUT("delete with global variable");
} else {
@ -4685,10 +4708,6 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
bool inc = expr->op() == Token::INC;
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
BAILOUT("non-stack/non-global variable in count operation");
}
VISIT_FOR_VALUE(target);
// Match the full code generator stack by simulating an extra stack
@ -4704,9 +4723,16 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
after,
expr->position(),
expr->AssignmentId());
} else {
ASSERT(var->IsStackAllocated());
} else if (var->IsStackAllocated()) {
Bind(var, after);
} else if (var->IsContextSlot()) {
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
BAILOUT("lookup variable in count operation");
}
Drop(has_extra ? 2 : 1);
ast_context()->ReturnValue(expr->is_postfix() ? before : after);
@ -4785,7 +4811,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
HInstruction* store = is_fast_elements
? BuildStoreKeyedFastElement(obj, key, after, prop)
: new HStoreKeyedGeneric(obj, key, after);
: BuildStoreKeyedGeneric(obj, key, after);
AddInstruction(store);
// Drop the key from the bailout environment. Overwrite the receiver
@ -5042,7 +5068,9 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
instr = new HInstanceOf(left, right);
HContext* context = new HContext;
AddInstruction(context);
instr = new HInstanceOf(context, left, right);
} else {
AddInstruction(new HCheckFunction(right, target));
instr = new HInstanceOfKnownGlobal(left, target);
@ -5185,9 +5213,10 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
}
// Support for construct call checks.
// Support for construct call checks.
void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: IsConstructCall");
ASSERT(argument_count == 0);
ast_context()->ReturnInstruction(new HIsConstructCall, ast_id);
}
@ -5251,8 +5280,11 @@ void HGraphBuilder::GenerateStringCharFromCode(int argument_count,
// Fast support for string.charAt(n) and string[n].
void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringCharAt, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringCharAt, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5281,8 +5313,11 @@ void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) {
// Fast support for StringAdd.
void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringAdd, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringAdd, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5290,8 +5325,11 @@ void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
// Fast support for SubString.
void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::SubString, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::SubString, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5299,8 +5337,11 @@ void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
// Fast support for StringCompare.
void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringCompare, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringCompare, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5308,8 +5349,11 @@ void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
// Support for direct calls from JavaScript to native RegExp code.
void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
ASSERT_EQ(4, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::RegExpExec, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::RegExpExec, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5318,9 +5362,11 @@ void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::RegExpConstructResult, argument_count);
new HCallStub(context, CodeStub::RegExpConstructResult, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5334,8 +5380,11 @@ void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) {
// Fast support for number to string.
void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::NumberToString, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::NumberToString, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5366,30 +5415,36 @@ void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) {
void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::SIN);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::COS);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::LOG);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5408,7 +5463,10 @@ void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count,
void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
int ast_id) {
BAILOUT("inlined runtime function: GetCachedArrayIndex");
ASSERT(argument_count == 1);
HValue* value = Pop();
HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
ast_context()->ReturnInstruction(result, ast_id);
}

38
deps/v8/src/hydrogen.h

@ -60,6 +60,8 @@ class HBasicBlock: public ZoneObject {
HGraph* graph() const { return graph_; }
const ZoneList<HPhi*>* phis() const { return &phis_; }
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
@ -103,16 +105,14 @@ class HBasicBlock: public ZoneObject {
void ClearEnvironment() { last_environment_ = NULL; }
bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
HBasicBlock* parent_loop_header() const {
if (!HasParentLoopHeader()) return NULL;
return parent_loop_header_.get();
}
HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
parent_loop_header_.set(block);
ASSERT(parent_loop_header_ == NULL);
parent_loop_header_ = block;
}
bool HasParentLoopHeader() const { return parent_loop_header_.is_set(); }
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
void SetJoinId(int id);
@ -136,9 +136,6 @@ class HBasicBlock: public ZoneObject {
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
Handle<Object> cond() { return cond_; }
void set_cond(Handle<Object> value) { cond_ = value; }
#ifdef DEBUG
void Verify();
#endif
@ -153,7 +150,7 @@ class HBasicBlock: public ZoneObject {
HGraph* graph_;
ZoneList<HPhi*> phis_;
HInstruction* first_;
HInstruction* last_; // Last non-control instruction of the block.
HInstruction* last_;
HControlInstruction* end_;
HLoopInformation* loop_information_;
ZoneList<HBasicBlock*> predecessors_;
@ -166,9 +163,8 @@ class HBasicBlock: public ZoneObject {
int first_instruction_index_;
int last_instruction_index_;
ZoneList<int> deleted_phis_;
SetOncePointer<HBasicBlock> parent_loop_header_;
HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
Handle<Object> cond_;
};
@ -706,19 +702,17 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
// Visit an argument and wrap it in a PushArgument instruction.
HValue* VisitArgument(Expression* expr);
// Visit an argument subexpression.
void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
void PushArgumentsForStubCall(int argument_count);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
void ProcessCall(HCall* call);
void PreProcessCall(HCall* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@ -791,6 +785,9 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildLoadKeyedFastElement(HValue* object,
HValue* key,
Property* expr);
HInstruction* BuildLoadKeyedPixelArrayElement(HValue* object,
HValue* key,
Property* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
@ -831,9 +828,10 @@ class HGraphBuilder: public AstVisitor {
bool smi_and_map_check);
HBasicBlock* BuildTypeSwitch(ZoneMapList* maps,
ZoneList<HSubgraph*>* subgraphs,
HValue* receiver,
HBasicBlock* BuildTypeSwitch(HValue* receiver,
ZoneMapList* maps,
ZoneList<HSubgraph*>* body_graphs,
HSubgraph* default_graph,
int join_id);
TypeFeedbackOracle* oracle_;

17
deps/v8/src/ia32/assembler-ia32.cc

@ -2559,6 +2559,19 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
}
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x22);
emit_sse_operand(dst, src);
EMIT(offset);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@ -2594,8 +2607,8 @@ void Assembler::RecordDebugBreakSlot() {
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
void Assembler::RecordComment(const char* msg, bool force) {
if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}

53
deps/v8/src/ia32/assembler-ia32.h

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
@ -64,30 +64,14 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
static const int kNumAllocatableRegisters = 5;
static const int kNumAllocatableRegisters = 6;
static const int kNumRegisters = 8;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < 4 || reg.code() == 7);
return (reg.code() == 7) ? 4 : reg.code();
}
static inline const char* AllocationIndexToString(int index);
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index == 4) ? from_code(7) : from_code(index);
}
static inline int ToAllocationIndex(Register reg);
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"eax",
"ecx",
"edx",
"ebx",
"edi"
};
return names[index];
}
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
Register r = { code };
@ -110,6 +94,7 @@ struct Register {
int code_;
};
const Register eax = { 0 };
const Register ecx = { 1 };
const Register edx = { 2 };
@ -121,6 +106,26 @@ const Register edi = { 7 };
const Register no_reg = { -1 };
inline const char* Register::AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
}
inline int Register::ToAllocationIndex(Register reg) {
ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
}
inline Register Register::FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
struct XMMRegister {
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
@ -928,6 +933,7 @@ class Assembler : public Malloced {
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
@ -951,8 +957,9 @@ class Assembler : public Malloced {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Use --code-comments to enable, or provide "force = true" flag to always
// write a comment.
void RecordComment(const char* msg, bool force = false);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.

15
deps/v8/src/ia32/builtins-ia32.cc

@ -589,6 +589,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &shift_arguments);
// Compute the receiver in non-strict mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &convert_to_object);
@ -736,6 +743,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ mov(ebx, Operand(ebp, 3 * kPointerSize));
// Do not transform the receiver for strict mode functions.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &push_receiver);
// Compute the receiver in non-strict mode.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
__ cmp(ebx, Factory::null_value());

223
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3887,7 +3887,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::regexp_entry_native, 1);
static const int kRegExpExecuteArguments = 7;
__ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
__ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
@ -3932,7 +3932,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(edx, kRegExpExecuteArguments);
__ call(Operand(edx));
// Drop arguments and come back to JS mode.
__ LeaveApiExitFrame();
// Check the result.
Label success;
@ -3949,12 +3952,30 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax,
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
__ cmp(eax, Operand::StaticVariable(pending_exception));
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, Operand(eax));
__ j(equal, &runtime);
// For exception, throw the exception again.
// Clear the pending exception variable.
__ mov(Operand::StaticVariable(pending_exception), edx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ cmp(eax, Factory::termination_exception());
Label throw_termination_exception;
__ j(equal, &throw_termination_exception);
// Handle normal exception by following handler chain.
__ Throw(eax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(TERMINATION, eax);
__ bind(&failure);
// For failure and exception return null.
// For failure to match, return null.
__ mov(Operand(eax), Factory::null_value());
__ ret(4 * kPointerSize);
@ -4628,34 +4649,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // Remove state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
__ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
NearLabel skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
__ Throw(eax);
}
@ -4723,6 +4717,23 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ test(ecx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned, not_taken);
ExternalReference pending_exception_address(Top::k_pending_exception_address);
// Check that there is no pending exception, otherwise we
// should have returned some failure value.
if (FLAG_debug_code) {
__ push(edx);
__ mov(edx, Operand::StaticVariable(
ExternalReference::the_hole_value_location()));
NearLabel okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay);
__ int3();
__ bind(&okay);
__ pop(edx);
}
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
@ -4741,7 +4752,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
ExternalReference pending_exception_address(Top::k_pending_exception_address);
__ mov(eax, Operand::StaticVariable(pending_exception_address));
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@ -4762,52 +4772,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
NearLabel loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
__ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
__ mov(esp, Operand(esp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ mov(eax, false);
__ mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
__ mov(Operand::StaticVariable(pending_exception), eax);
}
// Clear the context pointer.
__ Set(esi, Immediate(0));
// Restore fp from handler and discard handler state.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // State.
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
__ ThrowUncatchable(type, eax);
}
@ -6543,9 +6508,19 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
// Verify that the receiver has pixel array elements.
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ Assert(equal, "Elements isn't a pixel array");
}
}
// Key must be in range.
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
@ -6559,6 +6534,90 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
}
// Stores an indexed element into a pixel array, clamping the stored value.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register scratch1,
bool load_elements_from_receiver,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged unless the
// store succeeds.
// key - holds the key (must be a smi) and is unchanged.
// value - holds the value (must be a smi) and is unchanged.
// elements - holds the element object of the receiver on entry if
// load_elements_from_receiver is false, otherwise used
// internally to store the pixel arrays elements and
// external array pointer.
//
// receiver, key and value remain unmodified until it's guaranteed that the
// store will succeed.
Register external_pointer = elements;
Register untagged_key = scratch1;
Register untagged_value = receiver; // Only set once success guaranteed.
// Fetch the receiver's elements if the caller hasn't already done so.
if (load_elements_from_receiver) {
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
}
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ Assert(equal, "Elements isn't a pixel array");
}
}
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
// Key must be a smi and it must be in range.
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
__ j(above_equal, out_of_range); // unsigned check handles negative keys.
// Value must be a smi.
__ JumpIfNotSmi(value, value_not_smi);
__ mov(untagged_value, value);
__ SmiUntag(untagged_value);
{ // Clamp the value to [0..255].
NearLabel done;
__ test(untagged_value, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
__ dec_b(untagged_value); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(external_pointer,
FieldOperand(elements, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
__ ret(0); // Return value in eax.
}
#undef __
} } // namespace v8::internal

38
deps/v8/src/ia32/code-stubs-ia32.h

@ -490,14 +490,14 @@ class NumberToStringStub: public CodeStub {
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
// Generate code to load an element from a pixel array. The receiver is assumed
// to not be a smi and to have elements, the caller must guarantee this
// precondition. If key is not a smi, then the generated code branches to
// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
// check has already been performed on key so that the smi check is not
// generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -508,6 +508,28 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Label* key_not_smi,
Label* out_of_range);
// Generate code to store an element into a pixel array, clamping values between
// [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated. If the value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
// elements, the generated code will branch to not_pixel_array, unless
// not_pixel_array is NULL, in which case the caller must ensure that the
// receiver has pixel array elements. If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register scratch1,
bool load_elements_from_receiver,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range);
} } // namespace v8::internal

27
deps/v8/src/ia32/codegen-ia32.cc

@ -3771,14 +3771,15 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@ -5587,7 +5588,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
if (property->emit_store()) {
Result ignored =
frame_->CallStoreIC(Handle<String>::cast(key), false);
frame_->CallStoreIC(Handle<String>::cast(key), false,
strict_mode_flag());
// A test eax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
@ -8223,19 +8225,24 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
Load(property->obj());
Load(property->key());
Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
frame_->Push(Smi::FromInt(strict_mode_flag()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
}
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
frame_->Push(Smi::FromInt(kNonStrictMode));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
@ -9670,7 +9677,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
result = frame()->CallStoreIC(name, is_contextual);
result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
@ -9754,7 +9761,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
result = frame()->CallStoreIC(name, is_contextual);
result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test eax
// instruction.

58
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -80,6 +80,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address prev_address = code_start_address;
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_address = code_start_address + table.GetPcOffset(i);
ASSERT_GE(curr_address, prev_address);
ZapCodeRange(prev_address, curr_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
@ -97,7 +98,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
curr_address += patch_size();
}
prev_address = curr_address;
@ -137,39 +139,39 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kPointerSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
Address call_target_address = pc_after - kIntSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kPointerSize;
Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to

12
deps/v8/src/ia32/disasm-ia32.cc

@ -1115,10 +1115,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfCPURegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pinsrd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfCPURegister(rm),
static_cast<int>(imm8));
data += 2;
} else {
UnimplementedInstruction();
}

133
deps/v8/src/ia32/full-codegen-ia32.cc

@ -47,8 +47,7 @@ namespace internal {
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
: masm_(masm) {
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
@ -60,7 +59,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
@ -310,12 +309,14 @@ void FullCodeGenerator::EmitReturnSequence() {
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
__ ret((scope()->num_parameters() + 1) * kPointerSize);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -330,6 +331,7 @@ FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
// Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
@ -614,6 +616,7 @@ void FullCodeGenerator::Move(Slot* dst,
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
int offset = Context::SlotOffset(dst->index());
ASSERT(!scratch1.is(esi) && !src.is(esi) && !scratch2.is(esi));
__ RecordWrite(scratch1, offset, src, scratch2);
}
}
@ -717,18 +720,25 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForStackValue(prop->obj());
// property. Use (keyed) IC to set the initial value. We cannot
// visit the rewrite because it's shared and we risk recording
// duplicate AST IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
if (function != NULL) {
VisitForStackValue(prop->key());
__ push(eax);
VisitForAccumulatorValue(function);
__ pop(ecx);
__ pop(edx);
} else {
VisitForAccumulatorValue(prop->key());
__ mov(ecx, result_register());
__ mov(result_register(), Factory::the_hole_value());
__ mov(edx, eax);
__ mov(eax, Factory::the_hole_value());
}
__ pop(edx);
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -1635,6 +1645,9 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically add smi value with unknown object. If result overflows or is
// not a smi then we had either a smi overflow or added a smi with a tagged
// pointer.
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
JumpPatchSite patch_site(masm_);
@ -1643,8 +1656,7 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
__ sub(Operand(eax), Immediate(value));
Token::Value op = Token::ADD;
TypeRecordingBinaryOpStub stub(op, mode);
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
if (left_is_constant_smi) {
__ mov(edx, Immediate(value));
} else {
@ -1663,6 +1675,9 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically subtract smi value with unknown object. If result overflows
// or is not a smi then we had either a smi overflow or added a smi with a
// tagged pointer.
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
@ -1683,8 +1698,7 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
Token::Value op = Token::SUB;
TypeRecordingBinaryOpStub stub(op, mode);
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
@ -1720,7 +1734,7 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
__ shl(edx, shift_value - 1);
}
// Convert int result to smi, checking that it is in int range.
ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ add(edx, Operand(edx));
__ j(overflow, &call_stub);
__ mov(eax, edx); // Put result back into eax.
@ -1733,6 +1747,8 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
}
break;
case Token::SHR:
// SHR must return a positive value. When shifting by 0 or 1 we need to
// check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(edx, eax);
__ SmiUntag(edx);
@ -1975,10 +1991,20 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
if (prop->is_synthetic()) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(edx, eax);
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
}
__ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -2004,8 +2030,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -3700,37 +3728,47 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (prop == NULL && var == NULL) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
context()->Plug(false);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
context()->Plug(eax);
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(kNonStrictMode)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Non-global variable. Call the runtime to delete from the
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(eax);
}
context()->Plug(eax);
} else {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
}
break;
}
@ -3949,8 +3987,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
TypeRecordingBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE);
TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);

49
deps/v8/src/ia32/ic-ia32.cc

@ -108,6 +108,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Register name,
Register r0,
Register r1) {
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@ -806,28 +809,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(ecx, eax); // Save the value. Key is not longer needed.
__ SmiUntag(ecx);
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, ecx); // 1 if negative, 0 if positive.
__ dec_b(ecx); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
__ ret(0); // Return value in eax.
GenerateFastPixelArrayStore(masm,
edx,
ecx,
eax,
edi,
ebx,
false,
NULL,
&slow,
&slow,
&slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@ -1208,7 +1200,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Check if the name is a string.
Label miss;
__ test(ecx, Immediate(kSmiTagMask));
__ j(zero, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1488,7 +1487,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@ -1498,7 +1498,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
MONOMORPHIC,
extra_ic_state);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.

332
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -43,13 +43,20 @@ class SafepointGenerator : public PostCallGenerator {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
int deoptimization_index)
int deoptimization_index,
bool ensure_reloc_space = false)
: codegen_(codegen),
pointers_(pointers),
deoptimization_index_(deoptimization_index) { }
deoptimization_index_(deoptimization_index),
ensure_reloc_space_(ensure_reloc_space) { }
virtual ~SafepointGenerator() { }
virtual void Generate() {
// Ensure that we have enough space in the reloc info to patch
// this with calls when doing deoptimization.
if (ensure_reloc_space_) {
codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
}
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@ -57,6 +64,7 @@ class SafepointGenerator : public PostCallGenerator {
LCodeGen* codegen_;
LPointerMap* pointers_;
int deoptimization_index_;
bool ensure_reloc_space_;
};
@ -157,6 +165,8 @@ bool LCodeGen::GeneratePrologue() {
// Trace the call.
if (FLAG_trace) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@ -367,10 +377,14 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
LInstruction* instr,
bool adjusted) {
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
if (!adjusted) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ call(code, mode);
RegisterLazyDeoptimization(instr);
@ -383,15 +397,19 @@ void LCodeGen::CallCode(Handle<Code> code,
}
void LCodeGen::CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr) {
void LCodeGen::CallRuntime(Runtime::Function* fun,
int argc,
LInstruction* instr,
bool adjusted) {
ASSERT(instr != NULL);
ASSERT(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ CallRuntime(function, num_arguments);
if (!adjusted) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ CallRuntime(fun, argc);
RegisterLazyDeoptimization(instr);
}
@ -568,10 +586,6 @@ void LCodeGen::RecordSafepoint(
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
if (kind & Safepoint::kWithRegisters) {
// Register esi always contains a pointer to the context.
safepoint.DefinePointerRegister(esi);
}
}
@ -635,6 +649,7 @@ void LCodeGen::DoParameter(LParameter* instr) {
void LCodeGen::DoCallStub(LCallStub* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
@ -804,7 +819,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ test(left, Operand(left));
__ j(not_zero, &done);
if (right->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@ -945,19 +960,31 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
if (BitCast<uint64_t, double>(v) == 0) {
__ xorpd(res, res);
} else {
int32_t v_int32 = static_cast<int32_t>(v);
if (static_cast<double>(v_int32) == v) {
__ push_imm32(v_int32);
__ cvtsi2sd(res, Operand(esp, 0));
__ add(Operand(esp), Immediate(kPointerSize));
Register temp = ToRegister(instr->TempAt(0));
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
} else {
__ xorpd(res, res);
__ Set(temp, Immediate(upper));
__ pinsrd(res, Operand(temp), 1);
}
} else {
uint64_t int_val = BitCast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
__ push_imm32(upper);
__ push_imm32(lower);
__ movdbl(res, Operand(esp, 0));
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(xmm0, Operand(temp));
__ por(res, xmm0);
}
}
}
}
@ -983,6 +1010,13 @@ void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
}
void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(array, PixelArray::kLengthOffset));
}
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
@ -1011,7 +1045,7 @@ void LCodeGen::DoBitNotI(LBitNotI* instr) {
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->InputAt(0)));
CallRuntime(Runtime::kThrow, 1, instr);
CallRuntime(Runtime::kThrow, 1, instr, false);
if (FLAG_debug_code) {
Comment("Unreachable code.");
@ -1083,7 +1117,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@ -1196,6 +1230,7 @@ void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
__ pushad();
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@ -1686,6 +1721,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@ -1701,6 +1737,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1735,11 +1772,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register object = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
// A Smi is not instance of anything.
// A Smi is not an instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(zero, &false_result, not_taken);
// This is the inlined call site instanceof cache. The two occourences of the
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
NearLabel cache_miss;
@ -1751,10 +1788,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
__ jmp(&done);
// The inlined call site cache did not match. Check null and string before
// calling the deferred code.
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss);
// Null is not instance of anything.
// Null is not an instance of anything.
__ cmp(object, Factory::null_value());
__ j(equal, &false_result);
@ -1794,12 +1831,13 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(edi));
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 13;
static const int kAdditionalDelta = 16;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->SizeOfCodeGeneratedSince(&before_push_delta));
@ -1836,7 +1874,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
@ -1859,7 +1897,7 @@ void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
@ -1874,14 +1912,17 @@ void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ push(eax);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
__ mov(esp, ebp);
__ pop(ebp);
__ ret((ParameterCount() + 1) * kPointerSize);
__ Ret((ParameterCount() + 1) * kPointerSize, ecx);
}
@ -1945,6 +1986,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
@ -1997,22 +2039,33 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->InputAt(0)));
Register reg = ToRegister(instr->InputAt(0));
__ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
NearLabel done;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ j(equal, &done);
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ j(equal, &done);
__ cmp(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::fixed_cow_array_map()));
__ Check(equal, "Check for fast elements failed.");
__ Check(equal, "Check for fast elements or pixel array failed.");
__ bind(&done);
}
}
void LCodeGen::DoLoadPixelArrayExternalPointer(
LLoadPixelArrayExternalPointer* instr) {
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
@ -2035,7 +2088,10 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
ASSERT(result.is(elements));
// Load the result.
__ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
__ mov(result, FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
// Check for the hole value.
__ cmp(result, Factory::the_hole_value());
@ -2043,7 +2099,19 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
Register external_elements = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
ASSERT(result.is(external_elements));
// Load the result.
__ movzx_b(result, Operand(external_elements, key, times_1, 0));
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(eax));
@ -2101,24 +2169,36 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
ASSERT(ToRegister(instr->function()).is(edi));
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = ToRegister(instr->TempAt(0));
ASSERT(receiver.is(eax)); // Used for parameter count.
ASSERT(function.is(edi)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(eax));
// If the receiver is null or undefined, we have to pass the
// global object as a receiver.
NearLabel global_receiver, receiver_ok;
// If the receiver is null or undefined, we have to pass the global object
// as a receiver.
NearLabel global_object, receiver_ok;
__ cmp(receiver, Factory::null_value());
__ j(equal, &global_receiver);
__ j(equal, &global_object);
__ cmp(receiver, Factory::undefined_value());
__ j(not_equal, &receiver_ok);
__ bind(&global_receiver);
__ mov(receiver, GlobalObjectOperand());
__ bind(&receiver_ok);
__ j(equal, &global_object);
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Label invoke;
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
DeoptimizeIf(equal, instr->environment());
__ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
DeoptimizeIf(below, instr->environment());
__ jmp(&receiver_ok);
__ bind(&global_object);
// TODO(kmillikin): We have a hydrogen value for the global object. See
// if it's better to use it than to explicitly fetch it from the context
// here.
__ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
__ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@ -2131,7 +2211,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
Label loop;
NearLabel invoke, loop;
// length is a small non-negative integer, due to the test above.
__ test(length, Operand(length));
__ j(zero, &invoke);
@ -2149,13 +2229,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
RegisterEnvironmentForDeoptimization(env);
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
ASSERT(receiver.is(eax));
env->deoptimization_index(),
true);
v8::internal::ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
// Restore context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
}
@ -2171,7 +2248,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, esi);
__ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@ -2207,6 +2284,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
(scope()->num_heap_slots() > 0);
if (change_context) {
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
} else {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
// Set eax to arguments count if adaption is not needed. Assumes that eax
@ -2222,14 +2301,15 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (*function == *graph()->info()->closure()) {
__ CallSelf();
} else {
// This is an indirect call and will not be recorded in the reloc info.
// Add a comment to the reloc info in case we need to patch this during
// deoptimization.
__ RecordComment(RelocInfo::kFillerCommentString, true);
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
// Setup deoptimization.
RegisterLazyDeoptimization(instr);
// Restore context.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@ -2272,6 +2352,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@ -2483,7 +2564,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@ -2491,7 +2572,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@ -2499,7 +2580,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
@ -2537,46 +2618,46 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->InputAt(0)).is(ecx));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallNamed(LCallNamed* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ Drop(1);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
__ mov(ecx, instr->name());
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
@ -2588,7 +2669,8 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(edi));
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
@ -2598,7 +2680,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
CallRuntime(instr->function(), instr->arity(), instr, false);
}
@ -2633,6 +2715,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->value()).is(eax));
@ -2661,19 +2744,27 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements, offset), value);
} else {
__ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
__ mov(FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize),
value);
}
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
__ lea(key,
FieldOperand(elements,
key,
times_pointer_size,
FixedArray::kHeaderSize));
__ RecordWrite(elements, key, value);
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax));
@ -2809,6 +2900,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ SmiTag(index);
__ push(index);
}
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
RecordSafepointWithRegisters(
instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
@ -2886,6 +2978,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// integer value.
__ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@ -2933,6 +3026,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ Set(reg, Immediate(0));
__ PushSafepointRegisters();
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@ -3348,21 +3442,22 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
} else {
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
}
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
@ -3400,7 +3495,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
__ push(Immediate(instr->hydrogen()->pattern()));
__ push(Immediate(instr->hydrogen()->flags()));
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
__ mov(ebx, eax);
__ bind(&materialized);
@ -3412,7 +3507,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&runtime_allocate);
__ push(ebx);
__ push(Immediate(Smi::FromInt(size)));
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
__ pop(ebx);
__ bind(&allocated);
@ -3439,14 +3534,14 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (shared_info->num_literals() == 0 && !pretenure) {
FastNewClosureStub stub;
__ push(Immediate(shared_info));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
} else {
__ push(esi);
__ push(Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(shared_info));
__ push(Immediate(pretenure
? Factory::true_value()
: Factory::false_value()));
CallRuntime(Runtime::kNewClosure, 3, instr);
CallRuntime(Runtime::kNewClosure, 3, instr, false);
}
}
@ -3458,7 +3553,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
} else {
__ push(ToOperand(input));
}
CallRuntime(Runtime::kTypeof, 1, instr);
CallRuntime(Runtime::kTypeof, 1, instr, false);
}
@ -3577,6 +3672,53 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
NearLabel true_label;
NearLabel false_label;
NearLabel done;
EmitIsConstructCall(result);
__ j(equal, &true_label);
__ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&true_label);
__ mov(result, Factory::true_value());
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp);
EmitBranch(true_block, false_block, equal);
}
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
__ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
NearLabel check_frame_marker;
__ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker);
__ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@ -3602,9 +3744,15 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LEnvironment* env = instr->deoptimization_environment();
RecordPosition(pointers->position());
RegisterEnvironmentForDeoptimization(env);
// Create safepoint generator that will also ensure enough space in the
// reloc info for patching in deoptimization (since this is invoking a
// builtin)
SafepointGenerator safepoint_generator(this,
pointers,
env->deoptimization_index());
env->deoptimization_index(),
true);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
}
@ -3617,7 +3765,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ j(above_equal, &done);
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
__ bind(&done);
}

26
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -120,6 +120,10 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
return info_->is_strict() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@ -149,17 +153,14 @@ class LCodeGen BASE_EMBEDDED {
bool GenerateDeferredCode();
bool GenerateSafepointTable();
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallRuntime(Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
bool adjusted = true);
void CallRuntime(Runtime::Function* fun, int argc, LInstruction* instr,
bool adjusted = true);
void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
bool adjusted = true) {
Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
CallRuntime(function, argc, instr, adjusted);
}
// Generate a direct call to a known function. Expects the function
@ -229,6 +230,11 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

133
deps/v8/src/ia32/lithium-ia32.cc

@ -74,15 +74,13 @@ void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
// Inputs must use a fixed register.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
LUnallocated::cast(operand)->IsUsedAtStart() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
@ -1090,14 +1088,17 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LOperand* left = UseFixed(instance_of->left(), InstanceofStub::left());
LOperand* right = UseFixed(instance_of->right(), InstanceofStub::right());
LOperand* context = UseFixed(instance_of->context(), esi);
LInstanceOfAndBranch* result =
new LInstanceOfAndBranch(
UseFixed(instance_of->left(), InstanceofStub::left()),
UseFixed(instance_of->right(), InstanceofStub::right()));
new LInstanceOfAndBranch(context, left, right);
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
@ -1132,9 +1133,10 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), InstanceofStub::left()),
UseFixed(instr->right(), InstanceofStub::right()));
LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
LOperand* context = UseFixed(instr->context(), esi);
LInstanceOf* result = new LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1153,12 +1155,14 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), edi);
LOperand* receiver = UseFixed(instr->receiver(), eax);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* length = UseFixed(instr->length(), ebx);
LOperand* elements = UseFixed(instr->elements(), ecx);
LOperand* temp = FixedTemp(edx);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
elements);
elements,
temp);
return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@ -1230,21 +1234,27 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
LOperand* context = UseFixed(instr->context(), esi);
LOperand* key = UseFixed(instr->key(), ecx);
return MarkAsCall(DefineFixed(new LCallKeyed(key), eax), instr);
argument_count_ -= instr->argument_count();
LCallKeyed* result = new LCallKeyed(context, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallNamed, eax), instr);
LCallNamed* result = new LCallNamed(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallGlobal, eax), instr);
LCallGlobal* result = new LCallGlobal(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1255,16 +1265,19 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* constructor = UseFixed(instr->constructor(), edi);
argument_count_ -= instr->argument_count();
LCallNew* result = new LCallNew(constructor);
LCallNew* result = new LCallNew(context, constructor);
return MarkAsCall(DefineFixed(result, eax), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallFunction, eax), instr);
LCallFunction* result = new LCallFunction(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1320,9 +1333,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* dividend = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
LDivI* result = new LDivI(value, divisor, temp);
LDivI* result = new LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
@ -1508,6 +1521,13 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
}
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
Abort("Unimplemented: %s", "DoGetCachedArrayIndex");
return NULL;
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
@ -1537,6 +1557,12 @@ LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
}
LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LPixelArrayLength(array));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
@ -1672,13 +1698,15 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
int32_t value = instr->Integer32Value();
return DefineAsRegister(new LConstantI(value));
return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
return DefineAsRegister(new LConstantD(value));
LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
? TempRegister()
: NULL;
return DefineAsRegister(new LConstantD(temp));
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@ -1731,8 +1759,9 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), eax);
LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1747,7 +1776,14 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineSameAsFirst(new LLoadElements(input));
return DefineAsRegister(new LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
HLoadPixelArrayExternalPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
@ -1762,11 +1798,25 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
HLoadPixelArrayElement* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer =
UseRegisterAtStart(instr->external_pointer());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadPixelArrayElement* result =
new LLoadPixelArrayElement(external_pointer, key);
return DefineSameAsFirst(result);
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), eax);
LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1791,15 +1841,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), edx);
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
LOperand* key = UseFixed(instr->key(), ecx);
LOperand* val = UseFixed(instr->value(), eax);
LOperand* value = UseFixed(instr->value(), eax);
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
LStoreKeyedGeneric* result =
new LStoreKeyedGeneric(context, object, key, value);
return MarkAsCall(result, instr);
}
@ -1825,10 +1878,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), edx);
LOperand* val = UseFixed(instr->value(), eax);
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
LOperand* value = UseFixed(instr->value(), eax);
LStoreNamedGeneric* result = new LStoreNamedGeneric(obj, val);
LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr);
}
@ -1853,7 +1907,8 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteral, eax), instr);
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
}
@ -1894,8 +1949,10 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LOperand* context = UseFixed(instr->context(), esi);
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallStub, eax), instr);
LCallStub* result = new LCallStub(context);
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1925,6 +1982,12 @@ LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall);
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);

234
deps/v8/src/ia32/lithium-ia32.h

@ -41,7 +41,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@ -112,6 +111,8 @@ class LCodeGen;
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@ -123,6 +124,8 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -132,6 +135,7 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@ -424,11 +428,17 @@ class LParameter: public LTemplateInstruction<1, 0, 0> {
};
class LCallStub: public LTemplateInstruction<1, 0, 0> {
class LCallStub: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallStub(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
LOperand* context() { return inputs_[0]; }
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
@ -460,16 +470,18 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
};
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
LOperand* elements) {
LOperand* elements,
LOperand* temp) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@ -755,6 +767,24 @@ class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
class LClassOfTest: public LTemplateInstruction<1, 1, 1> {
public:
LClassOfTest(LOperand* value, LOperand* temp) {
@ -813,25 +843,31 @@ class LCmpTAndBranch: public LControlInstruction<2, 0> {
};
class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
class LInstanceOf: public LTemplateInstruction<1, 3, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
LOperand* context() { return inputs_[0]; }
};
class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
class LInstanceOfAndBranch: public LControlInstruction<3, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
LInstanceOfAndBranch(LOperand* context, LOperand* left, LOperand* right) {
inputs_[0] = context;
inputs_[1] = left;
inputs_[2] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
LOperand* context() { return inputs_[0]; }
};
@ -913,44 +949,34 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
class LConstantI: public LConstant {
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantI(int32_t value) : value_(value) { }
int32_t value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
int32_t value_;
int32_t value() const { return hydrogen()->Integer32Value(); }
};
class LConstantD: public LConstant {
class LConstantD: public LTemplateInstruction<1, 0, 1> {
public:
explicit LConstantD(double value) : value_(value) { }
double value() const { return value_; }
explicit LConstantD(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
double value_;
double value() const { return hydrogen()->DoubleValue(); }
};
class LConstantT: public LConstant {
class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantT(Handle<Object> value) : value_(value) { }
Handle<Object> value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
Handle<Object> value_;
Handle<Object> value() const { return hydrogen()->handle(); }
};
@ -999,6 +1025,17 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LPixelArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
};
class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
@ -1123,16 +1160,18 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
class LLoadNamedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
LLoadNamedGeneric(LOperand* context, LOperand* object) {
inputs_[0] = context;
inputs_[1] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
LOperand* object() { return inputs_[0]; }
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@ -1161,6 +1200,17 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadPixelArrayExternalPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
@ -1176,20 +1226,38 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
LOperand* object() { return inputs_[0]; }
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
inputs_[0] = context;
inputs_[1] = obj;
inputs_[2] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
};
class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
@ -1308,49 +1376,68 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
LCallKeyed(LOperand* context, LOperand* key) {
inputs_[0] = context;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
LOperand* context() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallNamed: public LTemplateInstruction<1, 0, 0> {
class LCallNamed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNamed(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallFunction: public LTemplateInstruction<1, 0, 0> {
class LCallFunction: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallFunction(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
LOperand* context() { return inputs_[0]; }
int arity() const { return hydrogen()->argument_count() - 2; }
};
class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallGlobal(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1368,10 +1455,11 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
};
class LCallNew: public LTemplateInstruction<1, 1, 0> {
class LCallNew: public LTemplateInstruction<1, 2, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
LCallNew(LOperand* context, LOperand* constructor) {
inputs_[0] = context;
inputs_[1] = constructor;
}
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
@ -1379,6 +1467,8 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* constructor() { return inputs_[1]; }
int arity() const { return hydrogen()->argument_count() - 1; }
};
@ -1525,13 +1615,21 @@ class LStoreNamedField: public LStoreNamed {
};
class LStoreNamedGeneric: public LStoreNamed {
class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedGeneric(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
@ -1564,12 +1662,24 @@ class LStoreKeyedFastElement: public LStoreKeyed {
};
class LStoreKeyedGeneric: public LStoreKeyed {
class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) { }
LStoreKeyedGeneric(LOperand* context,
LOperand* object,
LOperand* key,
LOperand* value) {
inputs_[0] = context;
inputs_[1] = object;
inputs_[2] = key;
inputs_[3] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }
LOperand* value() { return inputs_[3]; }
};
@ -1675,10 +1785,16 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
class LObjectLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LObjectLiteral(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
LOperand* context() { return inputs_[0]; }
};

133
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -78,11 +78,6 @@ void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
Register scratch) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
NearLabel done;
@ -129,11 +124,6 @@ void MacroAssembler::RecordWrite(Register object,
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
@ -458,6 +448,97 @@ void MacroAssembler::PopTryHandler() {
}
void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// eax must hold the exception.
if (!value.is(eax)) {
mov(eax, value);
}
// Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address);
mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(Operand::StaticVariable(handler_address));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
pop(ebp);
pop(edx); // Remove state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
NearLabel skip;
cmp(ebp, 0);
j(equal, &skip, not_taken);
mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
bind(&skip);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
ret(0);
}
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// eax must hold the exception.
if (!value.is(eax)) {
mov(eax, value);
}
// Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address);
mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
NearLabel loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
j(equal, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
mov(esp, Operand(esp, kNextOffset));
jmp(&loop);
bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(Operand::StaticVariable(handler_address));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
mov(eax, false);
mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
mov(Operand::StaticVariable(pending_exception), eax);
}
// Clear the context pointer.
Set(esi, Immediate(0));
// Restore fp from handler and discard handler state.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
pop(ebp);
pop(edx); // State.
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
ret(0);
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@ -604,11 +685,11 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
if (top_reg.is(result)) {
add(Operand(top_reg), Immediate(object_size));
} else {
lea(top_reg, Operand(result, object_size));
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(Operand(top_reg), Immediate(object_size));
j(carry, gc_required, not_taken);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
@ -657,7 +738,12 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
lea(result_end, Operand(result, element_count, element_size, header_size));
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, Operand(result));
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
@ -702,6 +788,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
mov(result_end, object_size);
}
add(result_end, Operand(result));
j(carry, gc_required, not_taken);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
@ -1196,7 +1283,7 @@ MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
// If false, it is returned as a pointer to a preallocated by caller memory
// region. Pointer to this region should be passed to a function as an
// implicit first argument.
#if defined(USING_BSD_ABI) || defined(__MINGW32__) || defined(__CYGWIN__)
#if defined(USING_BSD_ABI) || defined(__MINGW32__)
static const bool kReturnHandlesDirectly = true;
#else
static const bool kReturnHandlesDirectly = false;
@ -1581,6 +1668,20 @@ void MacroAssembler::Ret() {
}
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
if (is_uint16(bytes_dropped)) {
ret(bytes_dropped);
} else {
pop(scratch);
add(Operand(esp), Immediate(bytes_dropped));
push(scratch);
ret(0);
}
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(Operand(esp), Immediate(stack_elements * kPointerSize));

9
deps/v8/src/ia32/macro-assembler-ia32.h

@ -304,6 +304,11 @@ class MacroAssembler: public Assembler {
// Unlink the stack handler on top of the stack from the try handler chain.
void PopTryHandler();
// Activate the top handler in the try hander chain.
void Throw(Register value);
void ThrowUncatchable(UncatchableExceptionType type, Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@ -550,6 +555,10 @@ class MacroAssembler: public Assembler {
void Ret();
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);

9
deps/v8/src/ia32/simulator-ia32.h

@ -38,10 +38,15 @@ namespace internal {
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
// Call the generated regexp code directly. The entry function pointer should
typedef int (*regexp_matcher)(String*, int, const byte*,
const byte*, int*, Address, int);
// Call the generated regexp code directly. The code at the entry address should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
(entry(p0, p1, p2, p3, p4, p5, p6))
(FUNCTION_CAST<regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))

36
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2709,6 +2709,42 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
JSObject* receiver) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
Label miss;
// Check that the map matches.
__ CheckMap(edx, Handle<Map>(receiver->map()), &miss, false);
// Do the load.
GenerateFastPixelArrayStore(masm(),
edx,
ecx,
eax,
edi,
ebx,
true,
&miss,
&miss,
NULL,
&miss);
// Handle store cache miss.
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
JSObject* object,
JSObject* last) {

14
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1033,23 +1033,31 @@ Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
}
Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
Result VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in ecx, value in eax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
Result value = Pop();
RelocInfo::Mode mode;
if (is_contextual) {
PrepareForCall(0, 0);
value.ToRegister(eax);
__ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
value.Unuse();
mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
Result receiver = Pop();
PrepareForCall(0, 0);
MoveResultsToRegisters(&value, &receiver, eax, edx);
mode = RelocInfo::CODE_TARGET;
}
__ mov(ecx, name);
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
return RawCallCodeObject(ic, mode);
}

3
deps/v8/src/ia32/virtual-frame-ia32.h

@ -365,7 +365,8 @@ class VirtualFrame: public ZoneObject {
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are dropped.
Result CallStoreIC(Handle<String> name, bool is_contextual);
Result CallStoreIC(Handle<String> name, bool is_contextual,
StrictModeFlag strict_mode);
// Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped.

89
deps/v8/src/ic.cc

@ -342,7 +342,10 @@ void StoreIC::ClearInlinedVersion(Address address) {
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
SetTargetAtAddress(address,
target->extra_ic_state() == kStoreICStrict
? initialize_stub_strict()
: initialize_stub());
}
@ -1368,6 +1371,7 @@ static bool LookupForWrite(JSObject* object,
MaybeObject* StoreIC::Store(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value) {
@ -1397,8 +1401,10 @@ MaybeObject* StoreIC::Store(State state,
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif
Code* target = Builtins::builtin(Builtins::StoreIC_ArrayLength);
set_target(target);
Builtins::Name target = (extra_ic_state == kStoreICStrict)
? Builtins::StoreIC_ArrayLength_Strict
: Builtins::StoreIC_ArrayLength;
set_target(Builtins::builtin(target));
return receiver->SetProperty(*name, *value, NONE);
}
@ -1456,15 +1462,23 @@ MaybeObject* StoreIC::Store(State state,
// If no inlined store ic was patched, generate a stub for this
// store.
UpdateCaches(&lookup, state, receiver, name, value);
UpdateCaches(&lookup, state, extra_ic_state, receiver, name, value);
} else {
// Strict mode doesn't allow setting non-existent global property.
if (extra_ic_state == kStoreICStrict && IsContextual(object)) {
return ReferenceError("not_defined", name);
}
}
}
if (receiver->IsJSGlobalProxy()) {
// Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver.
if (target() != global_proxy_stub()) {
set_target(global_proxy_stub());
Code* stub = (extra_ic_state == kStoreICStrict)
? global_proxy_stub_strict()
: global_proxy_stub();
if (target() != stub) {
set_target(stub);
#ifdef DEBUG
TraceIC("StoreIC", name, state, target());
#endif
@ -1478,6 +1492,7 @@ MaybeObject* StoreIC::Store(State state,
void StoreIC::UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value) {
@ -1498,8 +1513,8 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Object* code = NULL;
switch (type) {
case FIELD: {
maybe_code = StubCache::ComputeStoreField(*name, *receiver,
lookup->GetFieldIndex());
maybe_code = StubCache::ComputeStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, extra_ic_state);
break;
}
case MAP_TRANSITION: {
@ -1508,8 +1523,8 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name);
maybe_code = StubCache::ComputeStoreField(*name, *receiver,
index, *transition);
maybe_code = StubCache::ComputeStoreField(
*name, *receiver, index, *transition, extra_ic_state);
break;
}
case NORMAL: {
@ -1520,10 +1535,11 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
maybe_code = StubCache::ComputeStoreGlobal(*name, *global, cell);
maybe_code = StubCache::ComputeStoreGlobal(
*name, *global, cell, extra_ic_state);
} else {
if (lookup->holder() != *receiver) return;
maybe_code = StubCache::ComputeStoreNormal();
maybe_code = StubCache::ComputeStoreNormal(extra_ic_state);
}
break;
}
@ -1531,12 +1547,14 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return;
maybe_code = StubCache::ComputeStoreCallback(*name, *receiver, callback);
maybe_code = StubCache::ComputeStoreCallback(
*name, *receiver, callback, extra_ic_state);
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
maybe_code = StubCache::ComputeStoreInterceptor(*name, *receiver);
maybe_code = StubCache::ComputeStoreInterceptor(
*name, *receiver, extra_ic_state);
break;
}
default:
@ -1552,7 +1570,11 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
set_target(Code::cast(code));
} else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes.
if (target() != Code::cast(code)) set_target(megamorphic_stub());
if (target() != Code::cast(code)) {
set_target(extra_ic_state == kStoreICStrict
? megamorphic_stub_strict()
: megamorphic_stub());
}
} else if (state == MEGAMORPHIC) {
// Update the stub cache.
StubCache::Set(*name, receiver->map(), Code::cast(code));
@ -1610,19 +1632,25 @@ MaybeObject* KeyedStoreIC::Store(State state,
if (use_ic) {
Code* stub = generic_stub();
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED &&
key->IsSmi() &&
receiver->map()->has_fast_elements()) {
MaybeObject* probe = StubCache::ComputeKeyedStoreSpecialized(*receiver);
stub =
probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
if (state == UNINITIALIZED) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasPixelElements()) {
MaybeObject* probe =
StubCache::ComputeKeyedStorePixelArray(*receiver);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
} else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
MaybeObject* probe =
StubCache::ComputeKeyedStoreSpecialized(*receiver);
stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked());
}
}
}
if (stub != NULL) set_target(stub);
@ -1795,8 +1823,9 @@ MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
ASSERT(args.length() == 3);
StoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<String>(1),
args.at<Object>(2));
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, extra_ic_state, args.at<Object>(0),
args.at<String>(1), args.at<Object>(2));
}

23
deps/v8/src/ic.h

@ -398,9 +398,16 @@ class KeyedLoadIC: public IC {
class StoreIC: public IC {
public:
enum StoreICStrictMode {
kStoreICNonStrict = kNonStrictMode,
kStoreICStrict = kStrictMode
};
StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
MUST_USE_RESULT MaybeObject* Store(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name,
Handle<Object> value);
@ -408,7 +415,8 @@ class StoreIC: public IC {
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state);
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateGlobalProxy(MacroAssembler* masm);
@ -424,7 +432,9 @@ class StoreIC: public IC {
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
State state, Handle<JSObject> receiver,
State state,
Code::ExtraICState extra_ic_state,
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
@ -432,12 +442,21 @@ class StoreIC: public IC {
static Code* megamorphic_stub() {
return Builtins::builtin(Builtins::StoreIC_Megamorphic);
}
static Code* megamorphic_stub_strict() {
return Builtins::builtin(Builtins::StoreIC_Megamorphic_Strict);
}
static Code* initialize_stub() {
return Builtins::builtin(Builtins::StoreIC_Initialize);
}
static Code* initialize_stub_strict() {
return Builtins::builtin(Builtins::StoreIC_Initialize_Strict);
}
static Code* global_proxy_stub() {
return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
}
static Code* global_proxy_stub_strict() {
return Builtins::builtin(Builtins::StoreIC_GlobalProxy_Strict);
}
static void Clear(Address address, Code* target);

1
deps/v8/src/liveedit.cc

@ -36,7 +36,6 @@
#include "deoptimizer.h"
#include "global-handles.h"
#include "memory.h"
#include "oprofile-agent.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"

7
deps/v8/src/macro-assembler.h

@ -50,6 +50,13 @@ enum HandlerType {
};
// Types of uncatchable exceptions.
enum UncatchableExceptionType {
OUT_OF_MEMORY,
TERMINATION
};
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;

2
deps/v8/src/math.js

@ -220,7 +220,7 @@ function SetupMath() {
DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetProperty($Math,
"LOG10E",
0.43429448190325176,
0.4342944819032518,
DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetProperty($Math,
"PI",

9
deps/v8/src/messages.cc

@ -69,10 +69,13 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
Handle<String> type_handle = Factory::LookupAsciiSymbol(type);
Handle<JSArray> arguments_handle = Factory::NewJSArray(args.length());
Handle<FixedArray> arguments_elements =
Factory::NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
SetElement(arguments_handle, i, args[i]);
arguments_elements->set(i, *args[i]);
}
Handle<JSArray> arguments_handle =
Factory::NewJSArrayWithElements(arguments_elements);
int start = 0;
int end = 0;
@ -87,7 +90,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
? Factory::undefined_value()
: Handle<Object>::cast(stack_trace);
Handle<Object> stack_frames_handle = stack_frames.is_null()
Handle<Object> stack_frames_handle = stack_frames.is_null()
? Factory::undefined_value()
: Handle<Object>::cast(stack_frames);

4
deps/v8/src/messages.js

@ -211,6 +211,7 @@ function FormatMessage(message) {
invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
strict_mode_with: ["Strict mode code may not include a with statement"],
strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
too_many_parameters: ["Too many parameters in function definition"],
strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
@ -223,6 +224,8 @@ function FormatMessage(message) {
strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
strict_reserved_word: ["Use of future reserved word in strict mode"],
strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
};
}
var message_type = %MessageGetType(message);
@ -316,6 +319,7 @@ Script.prototype.lineFromPosition = function(position) {
return i;
}
}
return -1;
}

2
deps/v8/src/objects-debug.cc

@ -440,7 +440,7 @@ void JSRegExp::JSRegExpVerify() {
ASSERT(ascii_data->IsTheHole() || ascii_data->IsJSObject() ||
(is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
ASSERT(uc16_data->IsTheHole() || ascii_data->IsJSObject() ||
ASSERT(uc16_data->IsTheHole() || uc16_data->IsJSObject() ||
(is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
ASSERT(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
ASSERT(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());

22
deps/v8/src/objects-inl.h

@ -2610,10 +2610,12 @@ Code::Flags Code::ComputeFlags(Kind kind,
PropertyType type,
int argc,
InlineCacheHolderFlag holder) {
// Extra IC state is only allowed for monomorphic call IC stubs.
// Extra IC state is only allowed for monomorphic call IC stubs
// or for store IC stubs.
ASSERT(extra_ic_state == kNoExtraICState ||
(kind == CALL_IC && (ic_state == MONOMORPHIC ||
ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)));
ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
(kind == STORE_IC));
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
@ -2747,6 +2749,22 @@ MaybeObject* Map::GetSlowElementsMap() {
}
MaybeObject* Map::GetPixelArrayElementsMap() {
if (has_pixel_array_elements()) return this;
// TODO(danno): Special case empty object map (or most common case)
// to return a pre-canned pixel array map.
Object* obj;
{ MaybeObject* maybe_obj = CopyDropTransitions();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
Map* new_map = Map::cast(obj);
new_map->set_has_fast_elements(false);
new_map->set_has_pixel_array_elements(true);
Counters::map_to_pixel_array_elements.Increment();
return new_map;
}
ACCESSORS(Map, instance_descriptors, DescriptorArray,
kInstanceDescriptorsOffset)
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)

131
deps/v8/src/objects.cc

@ -531,10 +531,25 @@ MaybeObject* Object::GetProperty(Object* receiver,
MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
// Non-JS objects do not have integer indexed properties.
if (!IsJSObject()) return Heap::undefined_value();
return JSObject::cast(this)->GetElementWithReceiver(JSObject::cast(receiver),
index);
if (IsJSObject()) {
return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
}
Object* holder = NULL;
Context* global_context = Top::context()->global_context();
if (IsString()) {
holder = global_context->string_function()->instance_prototype();
} else if (IsNumber()) {
holder = global_context->number_function()->instance_prototype();
} else if (IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
} else {
// Undefined and null have no indexed properties.
ASSERT(IsUndefined() || IsNull());
return Heap::undefined_value();
}
return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
}
@ -1399,7 +1414,7 @@ MaybeObject* JSObject::AddProperty(String* name,
if (!map()->is_extensible()) {
Handle<Object> args[1] = {Handle<String>(name)};
return Top::Throw(*Factory::NewTypeError("object_not_extensible",
HandleVector(args, 1)));
HandleVector(args, 1)));
}
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
@ -1707,8 +1722,9 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name,
}
bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
Object* value) {
MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
Object* value,
bool* found) {
for (Object* pt = GetPrototype();
pt != Heap::null_value();
pt = pt->GetPrototype()) {
@ -1718,15 +1734,16 @@ bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
SetElementWithCallback(element, index, value, JSObject::cast(pt));
return true;
*found = true;
return SetElementWithCallback(
dictionary->ValueAt(entry), index, value, JSObject::cast(pt));
}
}
}
return false;
*found = false;
return Heap::the_hole_value();
}
@ -2618,7 +2635,17 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
return dictionary->DeleteProperty(entry, mode);
Object* result = dictionary->DeleteProperty(entry, mode);
if (mode == STRICT_DELETION && result == Heap::false_value()) {
// In strict mode, deleting a non-configurable property throws
// exception. dictionary->DeleteProperty will return false_value()
// if a non-configurable property is being deleted.
HandleScope scope;
Handle<Object> i = Factory::NewNumberFromUint(index);
Handle<Object> args[2] = { i, Handle<Object>(this) };
return Top::Throw(*Factory::NewTypeError("strict_delete_property",
HandleVector(args, 2)));
}
}
break;
}
@ -2657,6 +2684,13 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
if (!result.IsProperty()) return Heap::true_value();
// Ignore attributes if forcing a deletion.
if (result.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
HandleScope scope;
Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
return Top::Throw(*Factory::NewTypeError("strict_delete_property",
HandleVector(args, 2)));
}
return Heap::false_value();
}
// Check for interceptor.
@ -2779,6 +2813,13 @@ bool JSObject::ReferencesObject(Object* obj) {
MaybeObject* JSObject::PreventExtensions() {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->PreventExtensions();
}
// If there are fast elements we normalize.
if (HasFastElements()) {
Object* ok;
@ -6229,10 +6270,35 @@ const char* Code::PropertyType2String(PropertyType type) {
}
void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
const char* name = NULL;
switch (kind) {
case CALL_IC:
if (extra == STRING_INDEX_OUT_OF_BOUNDS) {
name = "STRING_INDEX_OUT_OF_BOUNDS";
}
break;
case STORE_IC:
if (extra == StoreIC::kStoreICStrict) {
name = "STRICT";
}
break;
default:
break;
}
if (name != NULL) {
PrintF(out, "extra_ic_state = %s\n", name);
} else {
PrintF(out, "etra_ic_state = %d\n", extra);
}
}
void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "kind = %s\n", Kind2String(kind()));
if (is_inline_cache_stub()) {
PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
PrintExtraICState(out, kind(), extra_ic_state());
PrintF(out, "ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
if (ic_state() == MONOMORPHIC) {
PrintF(out, "type = %s\n", PropertyType2String(type()));
@ -6962,9 +7028,11 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
uint32_t elms_length = static_cast<uint32_t>(elms->length());
if (check_prototype &&
(index >= elms_length || elms->get(index)->IsTheHole()) &&
SetElementWithCallbackSetterInPrototypes(index, value)) {
return value;
(index >= elms_length || elms->get(index)->IsTheHole())) {
bool found;
MaybeObject* result =
SetElementWithCallbackSetterInPrototypes(index, value, &found);
if (found) return result;
}
@ -7096,9 +7164,11 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
if (check_prototype &&
SetElementWithCallbackSetterInPrototypes(index, value)) {
return value;
if (check_prototype) {
bool found;
MaybeObject* result =
SetElementWithCallbackSetterInPrototypes(index, value, &found);
if (found) return result;
}
// When we set the is_extensible flag to false we always force
// the element into dictionary mode (and force them to stay there).
@ -7182,7 +7252,7 @@ MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
}
MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver,
MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
@ -7239,14 +7309,14 @@ MaybeObject* JSObject::GetElementPostInterceptor(JSObject* receiver,
}
MaybeObject* JSObject::GetElementWithInterceptor(JSObject* receiver,
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
HandleScope scope;
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(receiver);
Handle<Object> this_handle(receiver);
Handle<JSObject> holder_handle(this);
if (!interceptor->getter()->IsUndefined()) {
@ -7272,7 +7342,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(JSObject* receiver,
}
MaybeObject* JSObject::GetElementWithReceiver(JSObject* receiver,
MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@ -8552,10 +8622,20 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
if (value->IsUndefined()) {
undefs++;
} else {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
// allocation. Bailout.
return Smi::FromInt(-1);
}
new_dict->AddNumberEntry(pos, value, details)->ToObjectUnchecked();
pos++;
}
} else {
if (key > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
// allocation. Bailout.
return Smi::FromInt(-1);
}
new_dict->AddNumberEntry(key, value, details)->ToObjectUnchecked();
}
}
@ -8564,6 +8644,11 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
uint32_t result = pos;
PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
// allocation. Bailout.
return Smi::FromInt(-1);
}
new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details)->
ToObjectUnchecked();
pos++;
@ -9292,7 +9377,7 @@ Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
JSObject::DeleteMode mode) {
PropertyDetails details = DetailsAt(entry);
// Ignore attributes if forcing a deletion.
if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) {
if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
return Heap::false_value();
}
SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0));

66
deps/v8/src/objects.h

@ -1286,7 +1286,12 @@ class HeapNumber: public HeapObject {
// caching.
class JSObject: public HeapObject {
public:
enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
enum DeleteMode {
NORMAL_DELETION,
STRICT_DELETION,
FORCE_DELETION
};
enum ElementsKind {
// The only "fast" kind.
FAST_ELEMENTS,
@ -1541,8 +1546,8 @@ class JSObject: public HeapObject {
// Returns the index'th element.
// The undefined object if index is out of bounds.
MaybeObject* GetElementWithReceiver(JSObject* receiver, uint32_t index);
MaybeObject* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
@ -1579,7 +1584,8 @@ class JSObject: public HeapObject {
void LookupRealNamedProperty(String* name, LookupResult* result);
void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
bool SetElementWithCallbackSetterInPrototypes(uint32_t index, Object* value);
MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes(
uint32_t index, Object* value, bool* found);
void LookupCallback(String* name, LookupResult* result);
// Returns the number of properties on this object filtering out properties
@ -1798,7 +1804,7 @@ class JSObject: public HeapObject {
Object* value,
bool check_prototype);
MaybeObject* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
MaybeObject* GetElementPostInterceptor(Object* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
DeleteMode mode);
@ -3199,6 +3205,7 @@ class Code: public HeapObject {
static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
static const char* PropertyType2String(PropertyType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
inline void Disassemble(const char* name) {
Disassemble(name, stdout);
}
@ -3592,6 +3599,19 @@ class Map: public HeapObject {
return ((1 << kHasFastElements) & bit_field2()) != 0;
}
// Tells whether an instance has pixel array elements.
inline void set_has_pixel_array_elements(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kHasPixelArrayElements));
} else {
set_bit_field2(bit_field2() & ~(1 << kHasPixelArrayElements));
}
}
inline bool has_pixel_array_elements() {
return ((1 << kHasPixelArrayElements) & bit_field2()) != 0;
}
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
@ -3650,6 +3670,11 @@ class Map: public HeapObject {
// from the descriptors and the fast elements bit cleared.
MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
// Returns this map if it has the pixel array elements bit is set, otherwise
// returns a copy of the map, with all transitions dropped from the
// descriptors and the pixel array elements bit set.
MUST_USE_RESULT inline MaybeObject* GetPixelArrayElementsMap();
// Returns the property index for name (only valid for FAST MODE).
int PropertyIndexFor(String* name);
@ -3768,6 +3793,7 @@ class Map: public HeapObject {
static const int kStringWrapperSafeForDefaultValueOf = 3;
static const int kAttachedToSharedFunctionInfo = 4;
static const int kIsShared = 5;
static const int kHasPixelArrayElements = 6;
// Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2;
@ -4344,7 +4370,6 @@ class SharedFunctionInfo: public HeapObject {
kThisPropertyAssignmentsOffset + kPointerSize,
kSize> BodyDescriptor;
private:
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
@ -4363,6 +4388,35 @@ class SharedFunctionInfo: public HeapObject {
static const int kOptimizationDisabled = 7;
static const int kStrictModeFunction = 8;
private:
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
static const int kCompilerHintsSmiTagSize = kSmiTagSize;
static const int kCompilerHintsSize = kPointerSize;
#else
// On 64 bit platforms, compiler hints is not a smi, see comment above.
static const int kCompilerHintsSmiTagSize = 0;
static const int kCompilerHintsSize = kIntSize;
#endif
public:
// Constants for optimizing codegen for strict mode function tests.
// Allows to use byte-widgh instructions.
static const int kStrictModeBitWithinByte =
(kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
#else
#error Unknown byte ordering
#endif
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};

108
deps/v8/src/oprofile-agent.cc

@ -1,108 +0,0 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "oprofile-agent.h"
namespace v8 {
namespace internal {
bool OProfileAgent::Initialize() {
#ifdef ENABLE_OPROFILE_AGENT
if (FLAG_oprofile) {
if (handle_ != NULL) return false;
// Disable code moving by GC.
FLAG_always_compact = false;
FLAG_never_compact = true;
handle_ = op_open_agent();
return (handle_ != NULL);
} else {
return true;
}
#else
if (FLAG_oprofile) {
OS::Print("Warning: --oprofile specified but binary compiled without "
"oprofile support.\n");
}
return true;
#endif
}
void OProfileAgent::TearDown() {
#ifdef ENABLE_OPROFILE_AGENT
if (handle_ != NULL) {
op_close_agent(handle_);
}
#endif
}
#ifdef ENABLE_OPROFILE_AGENT
op_agent_t OProfileAgent::handle_ = NULL;
void OProfileAgent::CreateNativeCodeRegion(const char* name,
const void* ptr, unsigned int size) {
op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
}
void OProfileAgent::CreateNativeCodeRegion(String* name,
const void* ptr, unsigned int size) {
const char* func_name;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
func_name = name->length() > 0 ? *str : "<anonymous>";
CreateNativeCodeRegion(func_name, ptr, size);
}
void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
int line_num, const void* ptr, unsigned int size) {
Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
const char* func_name;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
func_name = name->length() > 0 ? *str : "<anonymous>";
SmartPointer<char> source_str =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
func_name, *source_str, line_num) != -1) {
CreateNativeCodeRegion(buf.start(), ptr, size);
} else {
CreateNativeCodeRegion("<script/func name too long>", ptr, size);
}
}
#endif // ENABLE_OPROFILE_AGENT
} } // namespace v8::internal

48
deps/v8/src/parser.cc

@ -764,8 +764,6 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree.
ASSERT(ok == (result != NULL));
// The only errors should be stack overflows.
ASSERT(ok || stack_overflow_);
}
// Make sure the target stack is empty.
@ -774,8 +772,8 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
// If there was a stack overflow we have to get rid of AST and it is
// not safe to do before scope has been deleted.
if (result == NULL) {
Top::StackOverflow();
zone_scope->DeleteOnExit();
if (stack_overflow_) Top::StackOverflow();
} else {
Handle<String> inferred_name(info->inferred_name());
result->set_inferred_name(inferred_name);
@ -2523,6 +2521,16 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
}
}
// "delete identifier" is a syntax error in strict mode.
if (op == Token::DELETE && temp_scope_->StrictMode()) {
VariableProxy* operand = expression->AsVariableProxy();
if (operand != NULL && !operand->is_this()) {
ReportMessage("strict_delete", Vector<const char*>::empty());
*ok = false;
return NULL;
}
}
return new UnaryOperation(op, expression);
} else if (Token::IsCountOp(op)) {
@ -3501,6 +3509,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
top_scope_->AddParameter(parameter);
num_parameters++;
if (num_parameters > kMaxNumFunctionParameters) {
ReportMessageAt(scanner().location(), "too_many_parameters",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
done = (peek() == Token::RPAREN);
if (!done) Expect(Token::COMMA, CHECK_OK);
}
@ -3919,16 +3933,15 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
Handle<String> type,
Vector< Handle<Object> > arguments) {
int argc = arguments.length();
Handle<JSArray> array = Factory::NewJSArray(argc, TENURED);
ASSERT(array->IsJSArray() && array->HasFastElements());
Handle<FixedArray> elements = Factory::NewFixedArray(argc, TENURED);
for (int i = 0; i < argc; i++) {
Handle<Object> element = arguments[i];
if (!element.is_null()) {
// We know this doesn't cause a GC here because we allocated the JSArray
// large enough.
array->SetFastElement(i, *element)->ToObjectUnchecked();
elements->set(i, *element);
}
}
Handle<JSArray> array = Factory::NewJSArrayWithElements(elements, TENURED);
ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
args->Add(new Literal(type));
args->Add(new Literal(array));
@ -4058,6 +4071,11 @@ Handle<Object> JsonParser::ParseJsonObject() {
uint32_t index;
if (key->AsArrayIndex(&index)) {
SetOwnElement(json_object, index, value);
} else if (key->Equals(Heap::Proto_symbol())) {
// We can't remove the __proto__ accessor since it's hardcoded
// in several places. Instead go along and add the value as
// the prototype of the created object if possible.
SetPrototype(json_object, value);
} else {
SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
}
@ -4255,6 +4273,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
capture_index);
}
builder->AddAtom(body);
// For compatability with JSC and ES3, we allow quantifiers after
// lookaheads, and break in all cases.
break;
}
case '|': {
@ -4328,7 +4348,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
type,
captures_started());
builder = stored_state->builder();
break;
continue;
}
case '[': {
RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
@ -4351,11 +4371,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
//
// CharacterClassEscape :: one of
// d D s S w W
// AtomEscape ::
// CharacterClassEscape
//
// CharacterClassEscape :: one of
// d D s S w W
case 'd': case 'D': case 's': case 'S': case 'w': case 'W': {
uc32 c = Next();
Advance(2);

5
deps/v8/src/parser.h

@ -436,6 +436,11 @@ class Parser {
Vector<Handle<String> > args);
protected:
// Limit on number of function parameters is chosen arbitrarily.
// Code::Flags uses only the low 17 bits of num-parameters to
// construct a hashable id, so if more than 2^17 are allowed, this
// should be checked.
static const int kMaxNumFunctionParameters = 32766;
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
UC16CharacterStream* source,
ZoneScope* zone_scope);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save