Browse Source

Upgrade V8 to 3.0.12

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
7eaa956bae
  1. 12
      deps/v8/0002-Patch-for-oprofile.patch
  2. 1
      deps/v8/AUTHORS
  3. 18
      deps/v8/ChangeLog
  4. 22
      deps/v8/SConstruct
  5. 10
      deps/v8/src/SConscript
  6. 247
      deps/v8/src/arm/assembler-arm.cc
  7. 168
      deps/v8/src/arm/assembler-arm.h
  8. 6
      deps/v8/src/arm/builtins-arm.cc
  9. 748
      deps/v8/src/arm/code-stubs-arm.cc
  10. 111
      deps/v8/src/arm/code-stubs-arm.h
  11. 2
      deps/v8/src/arm/codegen-arm-inl.h
  12. 40
      deps/v8/src/arm/codegen-arm.cc
  13. 10
      deps/v8/src/arm/constants-arm.cc
  14. 588
      deps/v8/src/arm/constants-arm.h
  15. 2
      deps/v8/src/arm/cpu-arm.cc
  16. 11
      deps/v8/src/arm/deoptimizer-arm.cc
  17. 409
      deps/v8/src/arm/disasm-arm.cc
  18. 11
      deps/v8/src/arm/frames-arm.cc
  19. 18
      deps/v8/src/arm/frames-arm.h
  20. 75
      deps/v8/src/arm/full-codegen-arm.cc
  21. 29
      deps/v8/src/arm/ic-arm.cc
  22. 8
      deps/v8/src/arm/jump-target-arm.cc
  23. 29
      deps/v8/src/arm/lithium-arm.cc
  24. 11
      deps/v8/src/arm/lithium-arm.h
  25. 160
      deps/v8/src/arm/lithium-codegen-arm.cc
  26. 4
      deps/v8/src/arm/lithium-codegen-arm.h
  27. 144
      deps/v8/src/arm/macro-assembler-arm.cc
  28. 42
      deps/v8/src/arm/macro-assembler-arm.h
  29. 478
      deps/v8/src/arm/simulator-arm.cc
  30. 84
      deps/v8/src/arm/simulator-arm.h
  31. 74
      deps/v8/src/arm/stub-cache-arm.cc
  32. 3
      deps/v8/src/assembler.h
  33. 37
      deps/v8/src/ast.cc
  34. 20
      deps/v8/src/ast.h
  35. 3
      deps/v8/src/bootstrapper.cc
  36. 7
      deps/v8/src/builtins.cc
  37. 1
      deps/v8/src/builtins.h
  38. 16
      deps/v8/src/code-stubs.h
  39. 19
      deps/v8/src/deoptimizer.h
  40. 6
      deps/v8/src/frames.cc
  41. 6
      deps/v8/src/heap.cc
  42. 31
      deps/v8/src/hydrogen-instructions.cc
  43. 310
      deps/v8/src/hydrogen-instructions.h
  44. 46
      deps/v8/src/hydrogen.cc
  45. 2
      deps/v8/src/hydrogen.h
  46. 111
      deps/v8/src/ia32/code-stubs-ia32.cc
  47. 1
      deps/v8/src/ia32/code-stubs-ia32.h
  48. 12
      deps/v8/src/ia32/codegen-ia32.cc
  49. 48
      deps/v8/src/ia32/deoptimizer-ia32.cc
  50. 15
      deps/v8/src/ia32/full-codegen-ia32.cc
  51. 6
      deps/v8/src/ia32/ic-ia32.cc
  52. 49
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  53. 4
      deps/v8/src/ia32/lithium-codegen-ia32.h
  54. 11
      deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
  55. 7
      deps/v8/src/ia32/lithium-ia32.cc
  56. 6
      deps/v8/src/ia32/macro-assembler-ia32.cc
  57. 115
      deps/v8/src/ia32/stub-cache-ia32.cc
  58. 47
      deps/v8/src/ic.cc
  59. 3
      deps/v8/src/ic.h
  60. 10
      deps/v8/src/mark-compact.cc
  61. 36
      deps/v8/src/messages.js
  62. 3
      deps/v8/src/objects-inl.h
  63. 211
      deps/v8/src/parser.cc
  64. 8
      deps/v8/src/parser.h
  65. 2
      deps/v8/src/platform-freebsd.cc
  66. 19
      deps/v8/src/runtime-profiler.cc
  67. 49
      deps/v8/src/runtime.cc
  68. 2
      deps/v8/src/runtime.h
  69. 41
      deps/v8/src/safepoint-table.cc
  70. 25
      deps/v8/src/safepoint-table.h
  71. 10
      deps/v8/src/scanner-base.cc
  72. 18
      deps/v8/src/scanner-base.h
  73. 5
      deps/v8/src/scopes.cc
  74. 11
      deps/v8/src/scopes.h
  75. 4
      deps/v8/src/serialize.cc
  76. 3
      deps/v8/src/spaces.cc
  77. 8
      deps/v8/src/stub-cache.h
  78. 14
      deps/v8/src/top.cc
  79. 6
      deps/v8/src/top.h
  80. 4
      deps/v8/src/type-info.cc
  81. 10
      deps/v8/src/type-info.h
  82. 2
      deps/v8/src/v8.cc
  83. 6
      deps/v8/src/variables.cc
  84. 12
      deps/v8/src/variables.h
  85. 2
      deps/v8/src/version.cc
  86. 2
      deps/v8/src/x64/assembler-x64-inl.h
  87. 60
      deps/v8/src/x64/assembler-x64.cc
  88. 23
      deps/v8/src/x64/assembler-x64.h
  89. 28
      deps/v8/src/x64/builtins-x64.cc
  90. 16
      deps/v8/src/x64/code-stubs-x64.cc
  91. 12
      deps/v8/src/x64/codegen-x64.cc
  92. 442
      deps/v8/src/x64/deoptimizer-x64.cc
  93. 8
      deps/v8/src/x64/disasm-x64.cc
  94. 15
      deps/v8/src/x64/full-codegen-x64.cc
  95. 5
      deps/v8/src/x64/ic-x64.cc
  96. 660
      deps/v8/src/x64/lithium-codegen-x64.cc
  97. 55
      deps/v8/src/x64/lithium-codegen-x64.h
  98. 320
      deps/v8/src/x64/lithium-gap-resolver-x64.cc
  99. 74
      deps/v8/src/x64/lithium-gap-resolver-x64.h
  100. 188
      deps/v8/src/x64/lithium-x64.cc

12
deps/v8/0002-Patch-for-oprofile.patch

@ -1,12 +0,0 @@
--- SConstruct 2010-12-16 11:49:26.000000000 -0800
+++ /tmp/SConstruct 2010-12-16 11:48:23.000000000 -0800
@@ -225,7 +225,8 @@
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
- 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
+ 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT'],
+ 'LIBS': ['opagent', 'bfd']
}
},
'msvc': {

1
deps/v8/AUTHORS

@ -34,3 +34,4 @@ Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
Mike Gilbert <floppymaster@gmail.com>

18
deps/v8/ChangeLog

@ -1,3 +1,21 @@
2011-01-28: Version 3.0.12
Added support for strict mode parameter and object property
validation.
Fixed a couple of crash bugs.
2011-01-25: Version 3.0.11
Fixed a bug in deletion of lookup slots that could cause global
variables to be accidentally deleted (http://crbug.com/70066).
Added support for strict mode octal literal verification.
Fixed a couple of crash bugs (issues 1070 and 1071).
2011-01-24: Version 3.0.10
Fixed External::Wrap for 64-bit addresses (issue 1037).

22
deps/v8/SConstruct

@ -32,7 +32,7 @@ import os
from os.path import join, dirname, abspath
from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
@ -127,12 +127,16 @@ LIBRARY_FLAGS = {
},
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
},
'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
}
},
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
@ -325,7 +329,7 @@ V8_EXTRA_FLAGS = {
},
'msvc': {
'all': {
'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
'WARNINGFLAGS': ['/W3', '/WX', '/wd4351', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
@ -751,6 +755,11 @@ SIMPLE_OPTIONS = {
'default': 'off',
'help': 'enable inspector features'
},
'liveobjectlist': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable live object list features in the debugger'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
@ -1008,6 +1017,13 @@ def PostprocessOptions(options, os):
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger
print "Warning: forcing debuggersupport on for liveobjectlist"
options['debuggersupport'] = 'on'
options['inspector'] = 'on'
options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports):

10
deps/v8/src/SConscript

@ -95,6 +95,7 @@ SOURCES = {
mark-compact.cc
messages.cc
objects.cc
objects-printer.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
@ -216,8 +217,9 @@ SOURCES = {
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/lithium-x64.cc
x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
@ -236,10 +238,8 @@ SOURCES = {
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
'objects-debug.cc', 'objects-printer.cc', 'prettyprinter.cc',
'regexp-macro-assembler-tracer.cc'
],
'objectprint:on': ['objects-printer.cc']
'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
]
}

247
deps/v8/src/arm/assembler-arm.cc

@ -213,74 +213,29 @@ MemOperand::MemOperand(Register rn, Register rm,
// -----------------------------------------------------------------------------
// Implementation of Assembler.
// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
L = 1 << 20, // load (or store)
S = 1 << 20, // set condition code (or leave unchanged)
W = 1 << 21, // writeback base register (or leave unchanged)
A = 1 << 21, // accumulate in multiply instruction (or not)
B = 1 << 22, // unsigned byte (or word)
N = 1 << 22, // long (or short)
U = 1 << 23, // positive (or negative) offset/index
P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
I = 1 << 25, // immediate shifter operand (or not)
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
B23 = 1 << 23,
B24 = 1 << 24,
B25 = 1 << 25,
B26 = 1 << 26,
B27 = 1 << 27,
// Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
// Reserved condition.
nv = 15 << 28
};
// Specific instructions, constants, and masks.
// add(sp, sp, 4) instruction (aka Pop())
static const Instr kPopInstruction =
al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
const Instr kPopInstruction =
al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
static const Instr kPushRegPattern =
const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
static const Instr kPopRegPattern =
const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc
const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
@ -292,33 +247,28 @@ const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
const Instr kALUMask = 0x6f * B21;
const Instr kAddPattern = 0x4 * B21;
const Instr kSubPattern = 0x2 * B21;
const Instr kBicPattern = 0xe * B21;
const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
static const Instr kLdrRegFpOffsetPattern =
const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16;
static const Instr kStrRegFpOffsetPattern =
const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16;
static const Instr kLdrRegFpNegOffsetPattern =
const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16;
static const Instr kStrRegFpNegOffsetPattern =
const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16;
static const Instr kLdrStrInstrTypeMask = 0xffff0000;
static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
static const Instr kLdrStrOffsetMask = 0x00000fff;
const Instr kLdrStrInstrTypeMask = 0xffff0000;
const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
@ -411,7 +361,7 @@ int Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & Imm24Mask) << 8) >> 6;
return ((instr & kImm24Mask) << 8) >> 6;
}
@ -423,7 +373,7 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
int offset = instr & Off12Mask; // Zero extended offset.
int offset = instr & kOff12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
@ -436,7 +386,7 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
return (instr & ~Off12Mask) | offset;
return (instr & ~kOff12Mask) | offset;
}
@ -453,7 +403,7 @@ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
return (instr & ~Off12Mask) | offset;
return (instr & ~kOff12Mask) | offset;
}
@ -467,13 +417,13 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(offset >= 0);
ASSERT(is_uint12(offset));
// Set the offset.
return (instr & ~Off12Mask) | offset;
return (instr & ~kOff12Mask) | offset;
}
Register Assembler::GetRd(Instr instr) {
Register reg;
reg.code_ = ((instr & kRdMask) >> kRdShift);
reg.code_ = Instruction::RdValue(instr);
return reg;
}
@ -511,7 +461,7 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
return (instr & 0x0f7f0000) == 0x051f0000;
return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
}
@ -532,13 +482,14 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) {
if ((instr & ~kImm24Mask) == 0) {
// Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag);
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0) {
int imm26 = ((instr & kImm24Mask) << 8) >> 6;
if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
@ -548,7 +499,7 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) {
if ((instr & ~kImm24Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object.
@ -557,17 +508,17 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
if ((instr & CondMask) == nv) {
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
ASSERT((imm26 & 1) == 0);
instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
} else {
ASSERT((imm26 & 3) == 0);
instr &= ~Imm24Mask;
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
ASSERT(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & Imm24Mask));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@ -582,14 +533,14 @@ void Assembler::print(Label* L) {
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos());
if ((instr & ~Imm24Mask) == 0) {
if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
int cond = instr & CondMask;
Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
if (cond == nv) {
if (cond == kSpecialCondition) {
b = "blx";
c = "";
} else {
@ -731,14 +682,14 @@ static bool fits_shifter(uint32_t imm32,
}
} else {
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == kAddPattern ||
alu_insn == kSubPattern) {
if (alu_insn == ADD ||
alu_insn == SUB) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == kAndPattern ||
alu_insn == kBicPattern) {
} else if (alu_insn == AND ||
alu_insn == BIC) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip;
return true;
@ -782,7 +733,7 @@ void Assembler::addrmod1(Instr instr,
Register rd,
const Operand& x) {
CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) {
// Immediate.
uint32_t rotate_imm;
@ -794,8 +745,8 @@ void Assembler::addrmod1(Instr instr,
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@ -836,7 +787,7 @@ void Assembler::addrmod1(Instr instr,
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26);
ASSERT((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_;
if (!x.rm_.is_valid()) {
// Immediate offset.
@ -849,8 +800,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@ -869,7 +819,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
ASSERT(x.rn_.is_valid());
int am = x.am_;
if (!x.rm_.is_valid()) {
@ -883,8 +833,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC,
static_cast<Condition>(instr & CondMask));
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
}
@ -895,7 +844,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask));
Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return;
} else {
@ -909,7 +858,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
ASSERT(rl != 0);
ASSERT(!rn.is(pc));
emit(instr | rn.code()*B16 | rl);
@ -919,7 +868,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
(instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
@ -982,7 +931,7 @@ void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
// Dead code is a good location to emit the constant pool.
@ -996,7 +945,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@ -1006,21 +955,21 @@ void Assembler::blx(int branch_offset) { // v5 and above
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24));
emit(nv | B27 | B25 | h | (imm24 & Imm24Mask));
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
}
@ -1028,31 +977,31 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
addrmod1(cond | AND | s, src1, dst, src2);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 1*B21 | s, src1, dst, src2);
addrmod1(cond | EOR | s, src1, dst, src2);
}
void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 2*B21 | s, src1, dst, src2);
addrmod1(cond | SUB | s, src1, dst, src2);
}
void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 3*B21 | s, src1, dst, src2);
addrmod1(cond | RSB | s, src1, dst, src2);
}
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 4*B21 | s, src1, dst, src2);
addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al);
@ -1061,7 +1010,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
(instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
@ -1072,45 +1021,45 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 5*B21 | s, src1, dst, src2);
addrmod1(cond | ADC | s, src1, dst, src2);
}
void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 6*B21 | s, src1, dst, src2);
addrmod1(cond | SBC | s, src1, dst, src2);
}
void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 7*B21 | s, src1, dst, src2);
addrmod1(cond | RSC | s, src1, dst, src2);
}
void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 8*B21 | S, src1, r0, src2);
addrmod1(cond | TST | S, src1, r0, src2);
}
void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 9*B21 | S, src1, r0, src2);
addrmod1(cond | TEQ | S, src1, r0, src2);
}
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 10*B21 | S, src1, r0, src2);
addrmod1(cond | CMP | S, src1, r0, src2);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 11*B21 | S, src1, r0, src2);
addrmod1(cond | CMN | S, src1, r0, src2);
}
void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 12*B21 | s, src1, dst, src2);
addrmod1(cond | ORR | s, src1, dst, src2);
}
@ -1122,7 +1071,7 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | 13*B21 | s, r0, dst, src);
addrmod1(cond | MOV | s, r0, dst, src);
}
@ -1139,12 +1088,12 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2);
addrmod1(cond | BIC | s, src1, dst, src2);
}
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | 15*B21 | s, r0, dst, src);
addrmod1(cond | MVN | s, r0, dst, src);
}
@ -1222,7 +1171,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | B4 | src.code());
15*B8 | CLZ | src.code());
}
@ -1376,7 +1325,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
@ -1457,8 +1406,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
if ((mem_write_instr & kRdMask) ==
(mem_read_instr & kRdMask)) {
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
@ -1473,7 +1422,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// else
// ldr rz, [fp, #-24]
if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
@ -1503,22 +1453,23 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
if (Instruction::RdValue(mem_read_instr) ==
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
} else if ((mem_write_instr & kRdMask)
!= (ldr_instr & kRdMask)) {
} else if (Instruction::RdValue(mem_write_instr) !=
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
} else if (((mem_read_instr & kRdMask)
!= (ldr_instr & kRdMask)) ||
((mem_write_instr & kRdMask)
== (ldr_instr & kRdMask)) ) {
} else if ((Instruction::RdValue(mem_read_instr) !=
Instruction::RdValue(ldr_instr)) ||
(Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
@ -1640,18 +1591,14 @@ void Assembler::stm(BlockAddrMode am,
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
// See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
// Simulator do not share constants declaration.
ASSERT(code >= kDefaultStopCode);
static const uint32_t kStopInterruptCode = 1 << 23;
static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
// The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction.
BlockConstPoolFor(2);
if (code >= 0) {
svc(kStopInterruptCode + code, cond);
svc(kStopCode + code, cond);
} else {
svc(kStopInterruptCode + kMaxStopCode, cond);
svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
@ -1673,7 +1620,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
void Assembler::bkpt(uint32_t imm16) { // v5 and above
ASSERT(is_uint16(imm16));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
}
@ -1703,7 +1650,7 @@ void Assembler::cdp2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
}
@ -1726,7 +1673,7 @@ void Assembler::mcr2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@ -1749,7 +1696,7 @@ void Assembler::mrc2(Coprocessor coproc,
CRegister crn,
CRegister crm,
int opcode_2) { // v5 and above
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
@ -1779,7 +1726,7 @@ void Assembler::ldc2(Coprocessor coproc,
CRegister crd,
const MemOperand& src,
LFlag l) { // v5 and above
ldc(coproc, crd, src, l, static_cast<Condition>(nv));
ldc(coproc, crd, src, l, kSpecialCondition);
}
@ -1788,7 +1735,7 @@ void Assembler::ldc2(Coprocessor coproc,
Register rn,
int option,
LFlag l) { // v5 and above
ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
ldc(coproc, crd, rn, option, l, kSpecialCondition);
}
@ -1818,7 +1765,7 @@ void Assembler::stc2(Coprocessor
coproc, CRegister crd,
const MemOperand& dst,
LFlag l) { // v5 and above
stc(coproc, crd, dst, l, static_cast<Condition>(nv));
stc(coproc, crd, dst, l, kSpecialCondition);
}
@ -1827,7 +1774,7 @@ void Assembler::stc2(Coprocessor coproc,
Register rn,
int option,
LFlag l) { // v5 and above
stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
stc(coproc, crd, rn, option, l, kSpecialCondition);
}
@ -2637,7 +2584,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32

168
deps/v8/src/arm/assembler-arm.h

@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
namespace v8 {
@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
@ -372,149 +361,6 @@ enum Coprocessor {
};
// Condition field in instructions.
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
nz = 1 << 28, // Z clear not zero.
cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, // V set overflow.
vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, // N != V less than.
gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 // always.
};
// Returns the equivalent of !cc.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cc;
};
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants
// Shifter operand shift operation
enum ShiftOp {
LSL = 0 << 5,
LSR = 1 << 5,
ASR = 2 << 5,
ROR = 3 << 5,
RRX = -1
};
// Condition code updating mode
enum SBit {
SetCC = 1 << 20, // set condition code
LeaveCC = 0 << 20 // leave condition code unchanged
};
// Status register selection
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
// Status register fields
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
};
// Status register field mask (or'ed SRegisterField enum values)
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode
enum AddrMode {
// bit encoding P U W
Offset = (8|4|0) << 21, // offset (without writeback to base)
PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
};
// Load/store multiple addressing mode
enum BlockAddrMode {
// bit encoding P U W
da = (0|0|0) << 21, // decrement after
ia = (0|4|0) << 21, // increment after
db = (8|0|0) << 21, // decrement before
ib = (8|4|0) << 21, // increment before
da_w = (0|0|1) << 21, // decrement after with writeback to base
ia_w = (0|4|1) << 21, // increment after with writeback to base
db_w = (8|0|1) << 21, // decrement before with writeback to base
ib_w = (8|4|1) << 21 // increment before with writeback to base
};
// Coprocessor load/store operand size
enum LFlag {
Long = 1 << 22, // long load/store coprocessor
Short = 0 << 22 // short load/store coprocessor
};
// -----------------------------------------------------------------------------
// Machine instruction Operands
@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic {
};
typedef int32_t Instr;
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:
// Create an assembler. Instructions and relocation information are emitted
@ -1001,7 +840,6 @@ class Assembler : public Malloced {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
static const int kDefaultStopCode = -1;
void stop(const char* msg,
Condition cond = al,
int32_t code = kDefaultStopCode);

6
deps/v8/src/arm/builtins-arm.cc

@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Check whether an empty sized array is requested.
__ tst(array_size, array_size);
__ b(nz, &not_empty);
__ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array.
@ -566,7 +566,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// if it's a string already before calling the conversion builtin.
Label convert_argument;
__ bind(&not_cached);
__ BranchOnSmi(r0, &convert_argument);
__ JumpIfSmi(r0, &convert_argument);
// Is it a String?
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
__ b(nz, &rt_call);
__ b(ne, &rt_call);
#endif
// Load the initial map and verify that it is in fact a map.

748
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

111
deps/v8/src/arm/code-stubs-arm.h

@ -218,6 +218,117 @@ class GenericBinaryOpStub : public CodeStub {
};
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp3_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP3Bits::encode(use_vfp3_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateVFPOperation(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,

2
deps/v8/src/arm/codegen-arm-inl.h

@ -39,7 +39,7 @@ namespace internal {
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __

40
deps/v8/src/arm/codegen-arm.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
}
void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::Comparison(Condition cond,
Expression* left,
Expression* right,
bool strict) {
@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc,
// result : cc register
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq);
ASSERT(!strict || cond == eq);
Register lhs;
Register rhs;
@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc,
// We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) {
cc = ReverseCondition(cc);
if (cond == gt || cond == le) {
cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister();
@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump();
@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc,
__ cmp(lhs, Operand(rhs));
exit.Bind();
cc_reg_ = cc;
cc_reg_ = cond;
}
@ -1762,7 +1762,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
__ BranchOnSmi(receiver_reg, &build_args);
__ JumpIfSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@ -1774,7 +1774,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand.apply is Function.prototype.apply.
__ ldr(r0, MemOperand(sp, kPointerSize));
__ BranchOnSmi(r0, &build_args);
__ JumpIfSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
@ -1785,7 +1785,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand is a function.
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ BranchOnSmi(r1, &build_args);
__ JumpIfSmi(r1, &build_args);
__ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc);
Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cond);
cc_reg_ = al;
}
@ -4618,8 +4618,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_);
__ BranchOnNotSmi(exponent, &exponent_nonsmi);
__ BranchOnNotSmi(base, &base_nonsmi);
__ JumpIfNotSmi(exponent, &exponent_nonsmi);
__ JumpIfNotSmi(base, &base_nonsmi);
heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz);
deferred->Branch(ne);
// Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz);
deferred->Branch(ne);
// Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
@ -5849,14 +5849,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
// Delete from the context holding the named variable.
frame_->EmitPush(cp);
frame_->EmitPush(Operand(variable->name()));
frame_->CallRuntime(Runtime::kLookupContext, 2);
// r0: context
frame_->EmitPush(r0);
frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->EmitPush(r0);
} else {

10
deps/v8/src/arm/constants-arm.cc

@ -32,12 +32,10 @@
#include "constants-arm.h"
namespace assembler {
namespace arm {
namespace v8 {
namespace internal {
namespace v8i = v8::internal;
double Instr::DoubleImmedVmov() const {
double Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@ -149,6 +147,6 @@ int Registers::Number(const char* name) {
}
} } // namespace assembler::arm
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

588
deps/v8/src/arm/constants-arm.h

@ -86,8 +86,8 @@
#define USE_BLX 1
#endif
namespace assembler {
namespace arm {
namespace v8 {
namespace internal {
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
@ -102,6 +102,9 @@ static const int kNumVFPRegisters =
static const int kPCRegister = 15;
static const int kNoRegister = -1;
// -----------------------------------------------------------------------------
// Conditions.
// Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions.
//
@ -111,93 +114,262 @@ static const int kNoRegister = -1;
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
typedef unsigned char byte;
// Values for the condition field as defined in section A3.2
enum Condition {
no_condition = -1,
EQ = 0, // equal
NE = 1, // not equal
CS = 2, // carry set/unsigned higher or same
CC = 3, // carry clear/unsigned lower
MI = 4, // minus/negative
PL = 5, // plus/positive or zero
VS = 6, // overflow
VC = 7, // no overflow
HI = 8, // unsigned higher
LS = 9, // unsigned lower or same
GE = 10, // signed greater than or equal
LT = 11, // signed less than
GT = 12, // signed greater than
LE = 13, // signed less than or equal
AL = 14, // always (unconditional)
special_condition = 15, // special condition (refer to section A3.2.1)
max_condition = 16
kNoCondition = -1,
eq = 0 << 28, // Z set Equal.
ne = 1 << 28, // Z clear Not equal.
cs = 2 << 28, // C set Unsigned higher or same.
cc = 3 << 28, // C clear Unsigned lower.
mi = 4 << 28, // N set Negative.
pl = 5 << 28, // N clear Positive or zero.
vs = 6 << 28, // V set Overflow.
vc = 7 << 28, // V clear No overflow.
hi = 8 << 28, // C set, Z clear Unsigned higher.
ls = 9 << 28, // C clear or Z set Unsigned lower or same.
ge = 10 << 28, // N == V Greater or equal.
lt = 11 << 28, // N != V Less than.
gt = 12 << 28, // Z clear, N == V Greater than.
le = 13 << 28, // Z set or N != V Less then or equal
al = 14 << 28, // Always.
kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
kNumberOfConditions = 16,
// Aliases.
hs = cs, // C set Unsigned higher or same.
lo = cc // C clear Unsigned lower.
};
inline Condition NegateCondition(Condition cond) {
ASSERT(cond != al);
return static_cast<Condition>(cond ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cond;
};
}
// -----------------------------------------------------------------------------
// Instructions encoding.
// Instr is merely used by the Assembler to distinguish 32bit integers
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
typedef int32_t Instr;
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
no_operand = -1,
AND = 0, // Logical AND
EOR = 1, // Logical Exclusive OR
SUB = 2, // Subtract
RSB = 3, // Reverse Subtract
ADD = 4, // Add
ADC = 5, // Add with Carry
SBC = 6, // Subtract with Carry
RSC = 7, // Reverse Subtract with Carry
TST = 8, // Test
TEQ = 9, // Test Equivalence
CMP = 10, // Compare
CMN = 11, // Compare Negated
ORR = 12, // Logical (inclusive) OR
MOV = 13, // Move
BIC = 14, // Bit Clear
MVN = 15, // Move Not
max_operand = 16
AND = 0 << 21, // Logical AND.
EOR = 1 << 21, // Logical Exclusive OR.
SUB = 2 << 21, // Subtract.
RSB = 3 << 21, // Reverse Subtract.
ADD = 4 << 21, // Add.
ADC = 5 << 21, // Add with Carry.
SBC = 6 << 21, // Subtract with Carry.
RSC = 7 << 21, // Reverse Subtract with Carry.
TST = 8 << 21, // Test.
TEQ = 9 << 21, // Test Equivalence.
CMP = 10 << 21, // Compare.
CMN = 11 << 21, // Compare Negated.
ORR = 12 << 21, // Logical (inclusive) OR.
MOV = 13 << 21, // Move.
BIC = 14 << 21, // Bit Clear.
MVN = 15 << 21 // Move Not.
};
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
BX = 1,
BXJ = 2,
BLX = 3,
BKPT = 7,
BX = 1 << 4,
BXJ = 2 << 4,
BLX = 3 << 4,
BKPT = 7 << 4,
// With bits 22-21 11.
CLZ = 1
CLZ = 1 << 4
};
// Instruction encoding bits and masks.
enum {
H = 1 << 5, // Halfword (or byte).
S6 = 1 << 6, // Signed (or unsigned).
L = 1 << 20, // Load (or store).
S = 1 << 20, // Set condition code (or leave unchanged).
W = 1 << 21, // Writeback base register (or leave unchanged).
A = 1 << 21, // Accumulate in multiply instruction (or not).
B = 1 << 22, // Unsigned byte (or word).
N = 1 << 22, // Long (or short).
U = 1 << 23, // Positive (or negative) offset/index.
P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
I = 1 << 25, // Immediate shifter operand (or not).
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
B23 = 1 << 23,
B24 = 1 << 24,
B25 = 1 << 25,
B26 = 1 << 26,
B27 = 1 << 27,
B28 = 1 << 28,
// Instruction bit masks.
kCondMask = 15 << 28,
kALUMask = 0x6f << 21,
kRdMask = 15 << 12, // In str instruction.
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
kOff12Mask = (1 << 12) - 1
};
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// Condition code updating mode.
enum SBit {
SetCC = 1 << 20, // Set condition code.
LeaveCC = 0 << 20 // Leave condition code unchanged.
};
// Status register selection.
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
no_shift = -1,
LSL = 0, // Logical shift left
LSR = 1, // Logical shift right
ASR = 2, // Arithmetic shift right
ROR = 3, // Rotate right
max_shift = 4
enum ShiftOp {
LSL = 0 << 5, // Logical shift left.
LSR = 1 << 5, // Logical shift right.
ASR = 2 << 5, // Arithmetic shift right.
ROR = 3 << 5, // Rotate right.
// RRX is encoded as ROR with shift_imm == 0.
// Use a special code to make the distinction. The RRX ShiftOp is only used
// as an argument, and will never actually be encoded. The Assembler will
// detect it and emit the correct ROR shift operand with shift_imm == 0.
RRX = -1,
kNumberOfShifts = 4
};
// Status register fields.
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
};
// Status register field mask (or'ed SRegisterField enum values).
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode.
enum AddrMode {
// Bit encoding P U W.
Offset = (8|4|0) << 21, // Offset (without writeback to base).
PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
};
// Load/store multiple addressing mode.
enum BlockAddrMode {
// Bit encoding P U W .
da = (0|0|0) << 21, // Decrement after.
ia = (0|4|0) << 21, // Increment after.
db = (8|0|0) << 21, // Decrement before.
ib = (8|4|0) << 21, // Increment before.
da_w = (0|0|1) << 21, // Decrement after with writeback to base.
ia_w = (0|4|1) << 21, // Increment after with writeback to base.
db_w = (8|0|1) << 21, // Decrement before with writeback to base.
ib_w = (8|4|1) << 21, // Increment before with writeback to base.
// Alias modes for comparison when writeback does not matter.
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
ib_x = (8|4|0) << 21 // Increment before.
};
// Coprocessor load/store operand size.
enum LFlag {
Long = 1 << 22, // Long load/store coprocessor.
Short = 0 << 22 // Short load/store coprocessor.
};
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
// Special Software Interrupt codes when used in the presence of the ARM
// simulator.
// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes {
// transition to C code
call_rt_redirected = 0x10,
kCallRtRedirected= 0x10,
// break point
break_point = 0x20,
kBreakpoint= 0x20,
// stop
stop = 1 << 23
kStopCode = 1 << 23
};
static const int32_t kStopCodeMask = stop - 1;
static const uint32_t kMaxStopCode = stop - 1;
static const uint32_t kStopCodeMask = kStopCode - 1;
static const uint32_t kMaxStopCode = kStopCode - 1;
static const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
@ -206,6 +378,20 @@ enum VFPRegPrecision {
kDoublePrecision = 1
};
// VFP FPSCR constants.
static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum FPSCRRoundingModes {
RN, // Round to Nearest.
@ -214,22 +400,91 @@ enum FPSCRRoundingModes {
RZ // Round towards zero.
};
typedef int32_t instr_t;
// -----------------------------------------------------------------------------
// Hints.
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// These constants are declared in assembler-arm.cc, as they use named registers
// and other constants.
// The class Instr enables access to individual fields defined in the ARM
// add(sp, sp, 4) instruction (aka Pop())
extern const Instr kPopInstruction;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
extern const Instr kPushRegPattern;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
extern const Instr kPopRegPattern;
// mov lr, pc
extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
extern const Instr kMovLeaveCCMask;
extern const Instr kMovLeaveCCPattern;
extern const Instr kMovwMask;
extern const Instr kMovwPattern;
extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
// A mask for the Rd register for push, pop, ldr, str instructions.
extern const Instr kLdrRegFpOffsetPattern;
extern const Instr kStrRegFpOffsetPattern;
extern const Instr kLdrRegFpNegOffsetPattern;
extern const Instr kStrRegFpNegOffsetPattern;
extern const Instr kLdrStrInstrTypeMask;
extern const Instr kLdrStrInstrArgumentMask;
extern const Instr kLdrStrOffsetMask;
// -----------------------------------------------------------------------------
// Instruction abstraction.
// The class Instruction enables access to individual fields defined in the ARM
// architecture instruction set encoding as described in figure A3-1.
// Note that the Assembler uses typedef int32_t Instr.
//
// Example: Test whether the instruction at ptr does set the condition code
// bits.
//
// bool InstructionSetsConditionCodes(byte* ptr) {
// Instr* instr = Instr::At(ptr);
// int type = instr->TypeField();
// Instruction* instr = Instruction::At(ptr);
// int type = instr->TypeValue();
// return ((type == 0) || (type == 1)) && instr->HasS();
// }
//
class Instr {
class Instruction {
public:
enum {
kInstrSize = 4,
@ -237,14 +492,24 @@ class Instr {
kPCReadOffset = 8
};
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
static inline return_type Name(Instr instr) { \
char* temp = reinterpret_cast<char*>(&instr); \
return reinterpret_cast<Instruction*>(temp)->Name(); \
}
#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits.
inline instr_t InstructionBits() const {
return *reinterpret_cast<const instr_t*>(this);
inline Instr InstructionBits() const {
return *reinterpret_cast<const Instr*>(this);
}
// Set the raw instruction bits to value.
inline void SetInstructionBits(instr_t value) {
*reinterpret_cast<instr_t*>(this) = value;
inline void SetInstructionBits(Instr value) {
*reinterpret_cast<Instr*>(this) = value;
}
// Read one particular bit out of the instruction bits.
@ -252,93 +517,141 @@ class Instr {
return (InstructionBits() >> nr) & 1;
}
// Read a bit field out of the instruction bits.
// Read a bit field's value out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
}
// Read a bit field out of the instruction bits.
inline int BitField(int hi, int lo) const {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
// Static support.
// Read one particular bit out of the instruction bits.
static inline int Bit(Instr instr, int nr) {
return (instr >> nr) & 1;
}
// Read the value of a bit field out of the instruction bits.
static inline int Bits(Instr instr, int hi, int lo) {
return (instr >> lo) & ((2 << (hi - lo)) - 1);
}
// Read a bit field out of the instruction bits.
static inline int BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
}
// Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1.
//
// Two kind of accessors are declared:
// - <Name>Field() will return the raw field, ie the field's bits at their
// original place in the instruction encoding.
// eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
// ConditionField(instr) will return 0xC0000000.
// - <Name>Value() will return the field value, shifted back to bit 0.
// eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
// ConditionField(instr) will return 0xC.
// Generally applicable fields
inline Condition ConditionField() const {
inline Condition ConditionValue() const {
return static_cast<Condition>(Bits(31, 28));
}
inline int TypeField() const { return Bits(27, 25); }
inline Condition ConditionField() const {
return static_cast<Condition>(BitField(31, 28));
}
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int TypeValue() const { return Bits(27, 25); }
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
inline int RnValue() const { return Bits(19, 16); }
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
inline int CoprocessorField() const { return Bits(11, 8); }
inline int CoprocessorValue() const { return Bits(11, 8); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
inline int VmField() const { return Bits(3, 0); }
inline int VdField() const { return Bits(15, 12); }
inline int NField() const { return Bit(7); }
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
inline int PField() const { return Bit(24); }
inline int UField() const { return Bit(23); }
inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); }
inline int Opc2Field() const { return Bits(19, 16); }
inline int Opc3Field() const { return Bits(7, 6); }
inline int SzField() const { return Bit(8); }
inline int VLField() const { return Bit(20); }
inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); }
inline int VFPNRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 16, 7);
inline int VnValue() const { return Bits(19, 16); }
inline int VmValue() const { return Bits(3, 0); }
inline int VdValue() const { return Bits(15, 12); }
inline int NValue() const { return Bit(7); }
inline int MValue() const { return Bit(5); }
inline int DValue() const { return Bit(22); }
inline int RtValue() const { return Bits(15, 12); }
inline int PValue() const { return Bit(24); }
inline int UValue() const { return Bit(23); }
inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
inline int Opc2Value() const { return Bits(19, 16); }
inline int Opc3Value() const { return Bits(7, 6); }
inline int SzValue() const { return Bit(8); }
inline int VLValue() const { return Bit(20); }
inline int VCValue() const { return Bit(8); }
inline int VAValue() const { return Bits(23, 21); }
inline int VBValue() const { return Bits(6, 5); }
inline int VFPNRegValue(VFPRegPrecision pre) {
return VFPGlueRegValue(pre, 16, 7);
}
inline int VFPMRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 0, 5);
inline int VFPMRegValue(VFPRegPrecision pre) {
return VFPGlueRegValue(pre, 0, 5);
}
inline int VFPDRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 12, 22);
inline int VFPDRegValue(VFPRegPrecision pre) {
return VFPGlueRegValue(pre, 12, 22);
}
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
inline int OpcodeValue() const {
return static_cast<Opcode>(Bits(24, 21));
}
inline int SField() const { return Bit(20); }
inline Opcode OpcodeField() const {
return static_cast<Opcode>(BitField(24, 21));
}
inline int SValue() const { return Bit(20); }
// with register
inline int RmField() const { return Bits(3, 0); }
inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
inline int RegShiftField() const { return Bit(4); }
inline int RsField() const { return Bits(11, 8); }
inline int ShiftAmountField() const { return Bits(11, 7); }
inline int RmValue() const { return Bits(3, 0); }
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));
}
inline int RegShiftValue() const { return Bit(4); }
inline int RsValue() const { return Bits(11, 8); }
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate
inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); }
inline int Immed4Field() const { return Bits(19, 16); }
inline int ImmedMovwMovtField() const {
return Immed4Field() << 12 | Offset12Field(); }
inline int RotateValue() const { return Bits(11, 8); }
inline int Immed8Value() const { return Bits(7, 0); }
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
// Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); }
inline int BField() const { return Bit(22); }
inline int WField() const { return Bit(21); }
inline int LField() const { return Bit(20); }
inline int PUValue() const { return Bits(24, 23); }
inline int PUField() const { return BitField(24, 23); }
inline int BValue() const { return Bit(22); }
inline int WValue() const { return Bit(21); }
inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above
// with immediate
inline int Offset12Field() const { return Bits(11, 0); }
inline int Offset12Value() const { return Bits(11, 0); }
// multiple
inline int RlistField() const { return Bits(15, 0); }
inline int RlistValue() const { return Bits(15, 0); }
// extra loads and stores
inline int SignField() const { return Bit(6); }
inline int HField() const { return Bit(5); }
inline int ImmedHField() const { return Bits(11, 8); }
inline int ImmedLField() const { return Bits(3, 0); }
inline int SignValue() const { return Bit(6); }
inline int HValue() const { return Bit(5); }
inline int ImmedHValue() const { return Bits(11, 8); }
inline int ImmedLValue() const { return Bits(3, 0); }
// Fields used in Branch instructions
inline int LinkField() const { return Bit(24); }
inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
inline int LinkValue() const { return Bit(24); }
inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
// Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcField() const {
inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
}
@ -354,42 +667,45 @@ class Instr {
// Test for a stop instruction.
inline bool IsStop() const {
return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop);
return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
}
// Special accessors that test for existence of a value.
inline bool HasS() const { return SField() == 1; }
inline bool HasB() const { return BField() == 1; }
inline bool HasW() const { return WField() == 1; }
inline bool HasL() const { return LField() == 1; }
inline bool HasU() const { return UField() == 1; }
inline bool HasSign() const { return SignField() == 1; }
inline bool HasH() const { return HField() == 1; }
inline bool HasLink() const { return LinkField() == 1; }
inline bool HasS() const { return SValue() == 1; }
inline bool HasB() const { return BValue() == 1; }
inline bool HasW() const { return WValue() == 1; }
inline bool HasL() const { return LValue() == 1; }
inline bool HasU() const { return UValue() == 1; }
inline bool HasSign() const { return SignValue() == 1; }
inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
// Decoding the double immediate in the vmov instruction.
double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instr.
// Use the At(pc) function to create references to Instr.
static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
// to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instruction.
static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private:
// Join split register codes, depending on single or double precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
}
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
// We need to prevent the creation of instances of class Instr.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
@ -428,6 +744,6 @@ class VFPRegisters {
};
} } // namespace assembler::arm
} } // namespace v8::internal
#endif // V8_ARM_CONSTANTS_ARM_H_

2
deps/v8/src/arm/cpu-arm.cc

@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
assembler::arm::Simulator::FlushICache(start, size);
Simulator::FlushICache(start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,

11
deps/v8/src/arm/deoptimizer-arm.cc

@ -112,13 +112,16 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
@ -367,7 +370,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
@ -456,7 +459,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}

409
deps/v8/src/arm/disasm-arm.cc

@ -64,10 +64,8 @@
#include "platform.h"
namespace assembler {
namespace arm {
namespace v8i = v8::internal;
namespace v8 {
namespace internal {
//------------------------------------------------------------------------------
@ -78,7 +76,7 @@ namespace v8i = v8::internal;
class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer)
Vector<char> out_buffer)
: converter_(converter),
out_buffer_(out_buffer),
out_buffer_pos_(0) {
@ -100,45 +98,45 @@ class Decoder {
void PrintRegister(int reg);
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
void PrintShiftSat(Instr* instr);
void PrintPU(Instr* instr);
int FormatVFPRegister(Instruction* instr, const char* format);
void PrintMovwMovt(Instruction* instr);
int FormatVFPinstruction(Instruction* instr, const char* format);
void PrintCondition(Instruction* instr);
void PrintShiftRm(Instruction* instr);
void PrintShiftImm(Instruction* instr);
void PrintShiftSat(Instruction* instr);
void PrintPU(Instruction* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
// Handle formatting of instructions and their options.
int FormatRegister(Instr* instr, const char* option);
int FormatOption(Instr* instr, const char* option);
void Format(Instr* instr, const char* format);
void Unknown(Instr* instr);
int FormatRegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type, a 3-bit
// field in the instruction encoding.
// Types 0 and 1 are combined as they are largely the same except for the way
// they interpret the shifter operand.
void DecodeType01(Instr* instr);
void DecodeType2(Instr* instr);
void DecodeType3(Instr* instr);
void DecodeType4(Instr* instr);
void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr);
void DecodeType01(Instruction* instr);
void DecodeType2(Instruction* instr);
void DecodeType3(Instruction* instr);
void DecodeType4(Instruction* instr);
void DecodeType5(Instruction* instr);
void DecodeType6(Instruction* instr);
// Type 7 includes special Debugger instructions.
int DecodeType7(Instr* instr);
int DecodeType7(Instruction* instr);
// For VFP support.
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
Vector<char> out_buffer_;
int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder);
@ -169,15 +167,15 @@ void Decoder::Print(const char* str) {
// These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
static const char* cond_names[max_condition] = {
static const char* cond_names[kNumberOfConditions] = {
"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
"hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
};
// Print the condition guarding the instruction.
void Decoder::PrintCondition(Instr* instr) {
Print(cond_names[instr->ConditionField()]);
void Decoder::PrintCondition(Instruction* instr) {
Print(cond_names[instr->ConditionValue()]);
}
@ -188,36 +186,37 @@ void Decoder::PrintRegister(int reg) {
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg, false));
Print(VFPRegisters::Name(reg, false));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg, true));
Print(VFPRegisters::Name(reg, true));
}
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
static const char* shift_names[max_shift] = {
static const char* shift_names[kNumberOfShifts] = {
"lsl", "lsr", "asr", "ror"
};
// Print the register shift operands for the instruction. Generally used for
// data processing instructions.
void Decoder::PrintShiftRm(Instr* instr) {
Shift shift = instr->ShiftField();
int shift_amount = instr->ShiftAmountField();
int rm = instr->RmField();
void Decoder::PrintShiftRm(Instruction* instr) {
ShiftOp shift = instr->ShiftField();
int shift_index = instr->ShiftValue();
int shift_amount = instr->ShiftAmountValue();
int rm = instr->RmValue();
PrintRegister(rm);
if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
// Special case for using rm only.
return;
}
if (instr->RegShiftField() == 0) {
if (instr->RegShiftValue() == 0) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX");
@ -225,14 +224,15 @@ void Decoder::PrintShiftRm(Instr* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d",
shift_names[shift], shift_amount);
shift_names[shift_index],
shift_amount);
} else {
// by register
int rs = instr->RsField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s ", shift_names[shift]);
int rs = instr->RsValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s ", shift_names[shift_index]);
PrintRegister(rs);
}
}
@ -240,20 +240,20 @@ void Decoder::PrintShiftRm(Instr* instr) {
// Print the immediate operand for the instruction. Generally used for data
// processing instructions.
void Decoder::PrintShiftImm(Instr* instr) {
int rotate = instr->RotateField() * 2;
int immed8 = instr->Immed8Field();
void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d", imm);
}
// Print the optional shift and immediate used by saturating instructions.
void Decoder::PrintShiftSat(Instr* instr) {
void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d",
shift_names[instr->Bit(6) * 2],
instr->Bits(11, 7));
@ -262,21 +262,21 @@ void Decoder::PrintShiftSat(Instr* instr) {
// Print PU formatting to reduce complexity of FormatOption.
void Decoder::PrintPU(Instr* instr) {
void Decoder::PrintPU(Instruction* instr) {
switch (instr->PUField()) {
case 0: {
case da_x: {
Print("da");
break;
}
case 1: {
case ia_x: {
Print("ia");
break;
}
case 2: {
case db_x: {
Print("db");
break;
}
case 3: {
case ib_x: {
Print("ib");
break;
}
@ -292,20 +292,20 @@ void Decoder::PrintPU(Instr* instr) {
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
switch (svc) {
case call_rt_redirected:
Print("call_rt_redirected");
case kCallRtRedirected:
Print("call rt redirected");
return;
case break_point:
Print("break_point");
case kBreakpoint:
Print("breakpoint");
return;
default:
if (svc >= stop) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
if (svc >= kStopCode) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d - 0x%x",
svc & kStopCodeMask,
svc & kStopCodeMask);
} else {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
svc);
}
@ -316,32 +316,32 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instr* instr, const char* format) {
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnField();
int reg = instr->RnValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: Rd register
int reg = instr->RdField();
int reg = instr->RdValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 's') { // 'rs: Rs register
int reg = instr->RsField();
int reg = instr->RsValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'm') { // 'rm: Rm register
int reg = instr->RmField();
int reg = instr->RmValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: Rt register
int reg = instr->RtField();
int reg = instr->RtValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
int rlist = instr->RlistField();
int rlist = instr->RlistValue();
int reg = 0;
Print("{");
// Print register list in ascending order, by scanning the bit mask.
@ -365,22 +365,22 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
if (format[1] == 'n') {
int reg = instr->VnField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
int reg = instr->VnValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'm') {
int reg = instr->VmField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
int reg = instr->VmValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'd') {
int reg = instr->VdField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
int reg = instr->VdValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
}
@ -390,18 +390,18 @@ int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
}
int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
Print(format);
return 0;
}
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instr* instr) {
int imm = instr->ImmedMovwMovtField();
int rd = instr->RdField();
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
int rd = instr->RdValue();
PrintRegister(rd);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", imm);
}
@ -411,7 +411,7 @@ void Decoder::PrintMovwMovt(Instr* instr) {
// character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instr* instr, const char* format) {
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
case 'a': { // 'a: accumulate multiplies
if (instr->Bit(21) == 0) {
@ -434,7 +434,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%g", d);
return 1;
}
@ -448,7 +448,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(width > 0);
}
ASSERT((width + lsbit) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width);
return 1;
}
@ -469,7 +469,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
@ -505,7 +505,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%s", converter_.NameInCode(str));
return 3;
}
@ -513,13 +513,13 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field());
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
(instr->Bits(19, 8) << 4) +
instr->Bits(3, 0));
@ -527,8 +527,8 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", offs8);
return 4;
}
@ -544,10 +544,10 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeField() == 0) {
if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
ASSERT(instr->TypeField() == 1);
ASSERT(instr->TypeValue() == 1);
PrintShiftImm(instr);
}
return 8;
@ -562,7 +562,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
} else if (format[1] == 'v') { // 'svc
ASSERT(STRING_STARTS_WITH(format, "svc"));
PrintSoftwareInterrupt(instr->SvcField());
PrintSoftwareInterrupt(instr->SvcValue());
return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores
ASSERT(STRING_STARTS_WITH(format, "sign"));
@ -579,12 +579,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Field() << 2) + 8;
out_buffer_pos_ += v8i::OS::SNPrintF(
out_buffer_ + out_buffer_pos_,
int off = (instr->SImmed24Value() << 2) + 8;
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%+d -> %s",
off,
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
converter_.NameOfAddress(
reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@ -633,7 +633,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
void Decoder::Format(Instr* instr, const char* format) {
void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape.
@ -649,13 +649,13 @@ void Decoder::Format(Instr* instr, const char* format) {
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instr* instr) {
void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown");
}
void Decoder::DecodeType01(Instr* instr) {
int type = instr->TypeField();
void Decoder::DecodeType01(Instruction* instr) {
int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
@ -689,7 +689,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd
switch (instr->PUField()) {
case 0: {
case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
} else {
@ -697,7 +697,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 1: {
case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
} else {
@ -705,7 +705,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 2: {
case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
} else {
@ -713,7 +713,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 3: {
case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
} else {
@ -730,7 +730,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else {
// extra load/store instructions
switch (instr->PUField()) {
case 0: {
case da_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
} else {
@ -738,7 +738,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 1: {
case ia_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
} else {
@ -746,7 +746,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 2: {
case db_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
} else {
@ -754,7 +754,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
break;
}
case 3: {
case ib_x: {
if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
} else {
@ -772,7 +772,7 @@ void Decoder::DecodeType01(Instr* instr) {
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
switch (instr->Bits(7, 4)) {
switch (instr->BitField(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
@ -787,7 +787,7 @@ void Decoder::DecodeType01(Instr* instr) {
break;
}
} else if (instr->Bits(22, 21) == 3) {
switch (instr->Bits(7, 4)) {
switch (instr->BitField(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
@ -894,27 +894,27 @@ void Decoder::DecodeType01(Instr* instr) {
}
void Decoder::DecodeType2(Instr* instr) {
void Decoder::DecodeType2(Instruction* instr) {
switch (instr->PUField()) {
case 0: {
case da_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break;
}
case 1: {
case ia_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
}
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break;
}
case 2: {
case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
break;
}
case 3: {
case ib_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
break;
}
@ -927,14 +927,14 @@ void Decoder::DecodeType2(Instr* instr) {
}
void Decoder::DecodeType3(Instr* instr) {
void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
case 0: {
case da_x: {
ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break;
}
case 1: {
case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
@ -947,11 +947,11 @@ void Decoder::DecodeType3(Instr* instr) {
}
break;
}
case 2: {
case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break;
}
case 3: {
case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@ -969,7 +969,7 @@ void Decoder::DecodeType3(Instr* instr) {
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) {
if (instr->RmField() == 15) {
if (instr->RmValue() == 15) {
Format(instr, "bfc'cond 'rd, 'f");
} else {
Format(instr, "bfi'cond 'rd, 'rm, 'f");
@ -991,7 +991,7 @@ void Decoder::DecodeType3(Instr* instr) {
}
void Decoder::DecodeType4(Instr* instr) {
void Decoder::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@ -1001,41 +1001,43 @@ void Decoder::DecodeType4(Instr* instr) {
}
void Decoder::DecodeType5(Instr* instr) {
void Decoder::DecodeType5(Instruction* instr) {
Format(instr, "b'l'cond 'target");
}
void Decoder::DecodeType6(Instr* instr) {
void Decoder::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
int Decoder::DecodeType7(Instr* instr) {
int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
if (instr->SvcField() >= stop) {
if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded
// in the following 4 bytes.
out_buffer_pos_ +=
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"\n %p %08x stop message: %s",
reinterpret_cast<int32_t*>(instr + Instr::kInstrSize),
*reinterpret_cast<char**>(instr + Instr::kInstrSize),
*reinterpret_cast<char**>(instr + Instr::kInstrSize));
// We have decoded 2 * Instr::kInstrSize bytes.
return 2 * Instr::kInstrSize;
reinterpret_cast<int32_t*>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<char**>(instr
+ Instruction::kInstrSize),
*reinterpret_cast<char**>(instr
+ Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else {
Format(instr, "svc'cond 'svc");
}
} else {
DecodeTypeVFP(instr);
}
return Instr::kInstrSize;
return Instruction::kInstrSize;
}
// void Decoder::DecodeTypeVFP(Instr* instr)
// void Decoder::DecodeTypeVFP(Instruction* instr)
// vmov: Sn = Rt
// vmov: Rt = Sn
// vcvt: Dd = Sm
@ -1048,34 +1050,34 @@ int Decoder::DecodeType7(Instr* instr) {
// vmrs
// vmsr
// Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
void Decoder::DecodeTypeVFP(Instruction* instr) {
ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) {
if (instr->Opc1Value() == 0x7) {
// Other data processing instructions
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzField() == 0x1) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else {
Format(instr, "vmov.f32'cond 'Sd, 'Sm");
}
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() >> 1) == 0x6) &&
(instr->Opc3Field() & 0x1)) {
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)) {
} else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) {
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
} else if (instr->Opc3Field() == 0x0) {
if (instr->SzField() == 0x1) {
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
@ -1083,9 +1085,9 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else {
Unknown(instr); // Not used by V8.
}
} else if (instr->Opc1Field() == 0x3) {
if (instr->SzField() == 0x1) {
if (instr->Opc3Field() & 0x1) {
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() == 0x1) {
if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
@ -1093,14 +1095,14 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) {
if (instr->SzField() == 0x1) {
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
@ -1109,13 +1111,13 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
Unknown(instr); // Not used by V8.
}
} else {
if ((instr->VCField() == 0x0) &&
(instr->VAField() == 0x0)) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VCField() == 0x0) &&
(instr->VAField() == 0x7) &&
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
if (instr->VLField() == 0) {
if (instr->VLValue() == 0) {
if (instr->Bits(15, 12) == 0xF) {
Format(instr, "vmsr'cond FPSCR, APSR");
} else {
@ -1133,11 +1135,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
}
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) &&
(instr->VAField() == 0x0));
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
bool to_arm_register = (instr->VLField() == 0x1);
bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn");
@ -1147,19 +1150,19 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
}
void Decoder::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1));
void Decoder::DecodeVCMP(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1));
// Comparison.
bool dp_operation = (instr->SzField() == 1);
bool dp_operation = (instr->SzValue() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Field() == 0x4) {
if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if (instr->Opc2Field() == 0x5) {
} else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0");
} else {
Unknown(instr); // invalid
@ -1170,11 +1173,11 @@ void Decoder::DecodeVCMP(Instr* instr) {
}
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
bool double_to_single = (instr->SzField() == 1);
bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
@ -1184,13 +1187,13 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
}
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
bool dp_operation = (instr->SzValue() == 1);
if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0);
@ -1232,11 +1235,11 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
// <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
ASSERT(instr->TypeValue() == 6);
if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) {
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
case 0x8:
case 0xA:
if (instr->HasL()) {
@ -1257,8 +1260,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
Unknown(instr); // Not used by V8.
break;
}
} else if (instr->CoprocessorField() == 0xB) {
switch (instr->OpcodeField()) {
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) {
@ -1295,16 +1298,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == special_condition) {
if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
return Instr::kInstrSize;
return Instruction::kInstrSize;
}
switch (instr->TypeField()) {
switch (instr->TypeValue()) {
case 0:
case 1: {
DecodeType01(instr);
@ -1339,11 +1342,11 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break;
}
}
return Instr::kInstrSize;
return Instruction::kInstrSize;
}
} } // namespace assembler::arm
} } // namespace v8::internal
@ -1351,8 +1354,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm {
namespace v8i = v8::internal;
const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
@ -1367,7 +1368,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const {
return assembler::arm::Registers::Name(reg);
return v8::internal::Registers::Name(reg);
}
@ -1401,7 +1402,7 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) {
assembler::arm::Decoder d(converter_, buffer);
v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}

11
deps/v8/src/arm/frames-arm.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -30,20 +30,13 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h"
#include "arm/assembler-arm-inl.h"
namespace v8 {
namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) {
Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
Address sp = fp + ExitFrameConstants::kSPOffset;
if (marker == NULL) {
sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
}
return sp;
return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}

18
deps/v8/src/arm/frames-arm.h

@ -107,21 +107,17 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
static const int kCodeOffset = -1 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
// TODO(regis): Use a patched sp value on the stack instead.
// A marker of 0 indicates that double registers are saved.
static const int kMarkerOffset = -2 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
// The calling JS function is between FP and PC.
static const int kCallerPCOffset = +2 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is below FP.
static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +3 * kPointerSize;
static const int kCallerSPDisplacement = 2 * kPointerSize;
};
@ -131,8 +127,8 @@ class StandardFrameConstants : public AllStatic {
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
static const int kCallerSPOffset = +2 * kPointerSize;
static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerSPOffset = 2 * kPointerSize;
};

75
deps/v8/src/arm/full-codegen-arm.cc

@ -92,7 +92,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true,
}
void FullCodeGenerator::Split(Condition cc,
void FullCodeGenerator::Split(Condition cond,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (if_false == fall_through) {
__ b(cc, if_true);
__ b(cond, if_true);
} else if (if_true == fall_through) {
__ b(NegateCondition(cc), if_false);
__ b(NegateCondition(cond), if_false);
} else {
__ b(cc, if_true);
__ b(cond, if_true);
__ b(if_false);
}
}
@ -734,6 +734,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@ -817,7 +819,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object.
Label convert, done_convert;
__ BranchOnSmi(r0, &convert);
__ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(hs, &done_convert);
__ bind(&convert);
@ -1548,8 +1550,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
if (op == Token::ADD || op == Token::SUB || op == Token::MUL) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
}
context()->Plug(r0);
}
@ -2130,7 +2137,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip);
__ b(eq, if_true);
@ -2162,7 +2169,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@ -2183,7 +2190,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
@ -2229,7 +2236,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@ -2250,7 +2257,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@ -2271,7 +2278,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false);
__ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@ -2378,7 +2385,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
__ BranchOnSmi(r0, &null);
__ JumpIfSmi(r0, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
@ -2529,7 +2536,7 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
Label done;
// If the object is a smi return the object.
__ BranchOnSmi(r0, &done);
__ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
__ b(ne, &done);
@ -2559,7 +2566,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
Label done;
// If the object is a smi, return the value.
__ BranchOnSmi(r1, &done);
__ JumpIfSmi(r1, &done);
// If the object is not a value type, return the value.
__ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
@ -2992,22 +2999,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name()));
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else {
// Non-global variable. Call the runtime to look up the context
// where the variable was introduced.
// Non-global variable. Call the runtime to delete from the
// context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
__ CallRuntime(Runtime::kLookupContext, 2);
__ push(r0);
__ mov(r2, Operand(var->name()));
__ push(r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
}
break;
@ -3084,7 +3089,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(expr->op());
if (inline_smi_code) {
Label call_stub;
__ BranchOnNotSmi(r0, &call_stub);
__ JumpIfNotSmi(r0, &call_stub);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
@ -3171,7 +3176,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi.
Label no_conversion;
__ BranchOnSmi(r0, &no_conversion);
__ JumpIfSmi(r0, &no_conversion);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
__ bind(&no_conversion);
@ -3205,7 +3210,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
__ BranchOnSmi(r0, &done);
__ JumpIfSmi(r0, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@ -3458,34 +3463,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = eq;
Condition cond = eq;
bool strict = false;
switch (op) {
case Token::EQ_STRICT:
strict = true;
// Fall through
case Token::EQ:
cc = eq;
cond = eq;
__ pop(r1);
break;
case Token::LT:
cc = lt;
cond = lt;
__ pop(r1);
break;
case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order.
cc = lt;
cond = lt;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order.
cc = ge;
cond = ge;
__ mov(r1, result_register());
__ pop(r0);
break;
case Token::GTE:
cc = ge;
cond = ge;
__ pop(r1);
break;
case Token::IN:
@ -3498,19 +3503,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
__ BranchOnNotSmi(r2, &slow_case);
__ JumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cc, if_true, if_false, NULL);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags, r1, r0);
CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through);
Split(cond, if_true, if_false, fall_through);
}
}

29
deps/v8/src/arm/ic-arm.cc

@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor)));
__ b(nz, miss);
__ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip);
__ b(nz, miss);
__ b(ne, miss);
}
@ -379,7 +379,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
@ -388,7 +388,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
support_wrappers);
// Cache miss: Jump to runtime.
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -419,14 +420,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
int interceptor_bit,
Label* slow) {
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow);
__ JumpIfSmi(receiver, slow);
// Get the map of the receiver.
__ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field.
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow);
__ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
@ -749,7 +750,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Label index_smi, index_string;
// Check that the key is a smi.
__ BranchOnNotSmi(r2, &check_string);
__ JumpIfNotSmi(r2, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@ -1165,7 +1166,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register receiver = r1;
// Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string);
__ JumpIfNotSmi(key, &check_string);
__ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
@ -1346,7 +1347,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
Label slow;
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow);
__ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32.
__ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
@ -1470,7 +1471,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ BranchOnNotSmi(value, &slow);
__ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
@ -1589,7 +1590,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
Register scratch = r3;
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss);
__ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@ -1603,7 +1604,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss);
// Check that value is a smi.
__ BranchOnNotSmi(value, &miss);
__ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value);
@ -1673,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
return no_condition;
return kNoCondition;
}
}
@ -1704,7 +1705,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address) {
UNIMPLEMENTED();
// Currently there is no smi inlining in the ARM full code generator.
}

8
deps/v8/src/arm/jump-target-arm.cc

@ -76,7 +76,7 @@ void JumpTarget::DoJump() {
}
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc);
cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ b(cc, &entry_label_);
if (cc == al) {
__ b(cond, &entry_label_);
if (cond == al) {
cgen()->DeleteFrame();
}
}

29
deps/v8/src/arm/lithium-arm.cc

@ -820,6 +820,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
return MarkAsCall(DefineFixed(result, r0), instr);
}
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
@ -1018,11 +1019,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1030,8 +1028,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
new LInstanceOfAndBranch(Use(instance_of->left()),
Use(instance_of->right()));
new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
UseFixed(instance_of->right(), r1));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@ -1133,7 +1131,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
return AssignEnvironment(DefineAsRegister(result));
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
@ -1313,8 +1311,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1404,7 +1402,7 @@ LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsObject(value, TempRegister()));
return DefineAsRegister(new LIsObject(value));
}
@ -1604,7 +1602,14 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
return new LStoreGlobal(UseRegisterAtStart(instr->value()));
if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(new LStoreGlobal(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
return new LStoreGlobal(value, NULL);
}
}

11
deps/v8/src/arm/lithium-arm.h

@ -734,9 +734,8 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
LIsObject(LOperand* value, LOperand* temp) {
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
@ -745,10 +744,9 @@ class LIsObject: public LTemplateInstruction<1, 1, 1> {
class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@ -1256,10 +1254,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
};
class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public:
explicit LStoreGlobal(LOperand* value) {
LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")

160
deps/v8/src/arm/lithium-codegen-arm.cc

@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
if (cc == no_condition) {
if (cc == kNoCondition) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
@ -736,37 +736,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index);
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint =
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
deoptimization_index);
}
@ -774,20 +777,8 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
LPointerMap* pointers,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint =
safepoints_.DefineSafepointWithRegistersAndDoubles(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
deoptimization_index);
}
@ -1080,7 +1071,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
__ BranchOnNotSmi(result, &deoptimize);
__ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(al, &done);
@ -1160,7 +1151,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize.
__ BranchOnNotSmi(result, &deoptimize);
__ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result);
__ b(&done);
@ -1216,7 +1207,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(no_condition, instr->environment());
DeoptimizeIf(kNoCondition, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
@ -1483,7 +1474,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, nz);
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
@ -1541,7 +1532,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CallStub(&stub);
__ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs);
EmitBranch(true_block, false_block, nz);
EmitBranch(true_block, false_block, ne);
}
}
}
@ -1593,7 +1584,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
Condition cond = kNoCondition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
@ -1730,18 +1721,62 @@ Condition LCodeGen::EmitIsObject(Register input,
Register temp2,
Label* is_not_object,
Label* is_object) {
Abort("EmitIsObject unimplemented.");
return ne;
__ JumpIfSmi(input, is_not_object);
__ LoadRoot(temp1, Heap::kNullValueRootIndex);
__ cmp(input, temp1);
__ b(eq, is_object);
// Load map.
__ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
__ tst(temp2, Operand(1 << Map::kIsUndetectable));
__ b(ne, is_not_object);
// Load instance type and check that it is in object type range.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
__ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, is_not_object);
__ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
return le;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Abort("DoIsObject unimplemented.");
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register temp = scratch0();
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
__ b(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Abort("DoIsObjectAndBranch unimplemented.");
Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond =
EmitIsObject(reg, temp1, temp2, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
@ -1956,7 +1991,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
Abort("DoInstanceOfAndBranch unimplemented.");
ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ tst(r0, Operand(r0));
EmitBranch(true_block, false_block, eq);
}
@ -1989,7 +2033,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
ASSERT(result.is(r0));
// A Smi is not instance of anything.
__ BranchOnSmi(object, &false_result);
__ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
@ -2092,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
return ge;
default:
UNREACHABLE();
return no_condition;
return kNoCondition;
}
}
@ -2151,8 +2195,26 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
Register scratch = scratch0();
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->check_hole_value()) {
Register scratch2 = ToRegister(instr->TempAt(0));
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch2, ip);
DeoptimizeIf(eq, instr->environment());
}
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
}
@ -2565,7 +2627,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
new DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0));
// Smi check.
__ BranchOnNotSmi(input, deferred->entry());
__ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly.
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
@ -3512,7 +3574,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask));
@ -3597,7 +3659,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(no_condition, instr->environment());
DeoptimizeIf(kNoCondition, instr->environment());
}

4
deps/v8/src/arm/lithium-codegen-arm.h

@ -223,6 +223,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,

144
deps/v8/src/arm/macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
CheckConstPool(true, true);
add(pc, pc, Operand(index,
LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) {
@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object,
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Condition cond,
Label* branch) {
ASSERT(cc == eq || cc == ne);
ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start()));
b(cc, branch);
b(cond, branch);
}
@ -615,37 +615,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles) {
// r0 is argc.
// Compute callee's stack pointer before making changes and save it as
// ip register so that it is restored as sp register on exit, thereby
// popping the args.
// ip = sp + kPointerSize * #args;
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
// Compute the argv pointer and keep it in a callee-saved register.
sub(r6, ip, Operand(kPointerSize));
// Prepare the stack to be aligned when calling into C. After this point there
// are 5 pushes before the call into C, so the stack needs to be aligned after
// 5 pushes.
int frame_alignment = ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment != kPointerSize) {
// The following code needs to be more general if this assert does not hold.
ASSERT(frame_alignment == 2 * kPointerSize);
// With 5 pushes left the frame must be unaligned at this point.
mov(r7, Operand(Smi::FromInt(0)));
tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
push(r7, eq); // Push if aligned to make it unaligned.
}
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
// Compute the argv pointer in a callee-saved register.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
sub(r6, r6, Operand(kPointerSize));
// Setup the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (FLAG_debug_code) {
mov(ip, Operand(0));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject()));
push(ip); // Accessed from ExitFrame::code_slot.
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@ -659,25 +646,30 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// Optionally save all double registers.
if (save_doubles) {
// TODO(regis): Use vstrm instruction.
// The stack alignment code above made sp unaligned, so add space for one
// more double register and use aligned addresses.
ASSERT(kDoubleSize == frame_alignment);
// Mark the frame as containing doubles by pushing a non-valid return
// address, i.e. 0.
ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
mov(ip, Operand(0)); // Marker and alignment word.
push(ip);
int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
sub(sp, sp, Operand(space));
sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
const int offset = -2 * kPointerSize;
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
vstr(reg, sp, i * kDoubleSize + kPointerSize);
vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
// Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
// Note that d0 will be accessible at fp - 2*kPointerSize -
// DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
// alignment word were pushed after the fp.
// Reserve place for the return address and align the frame preparing for
// calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand(kPointerSize));
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
}
// Set the exit frame sp value to point just before the return address
// location.
add(ip, sp, Operand(kPointerSize));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@ -715,12 +707,10 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers.
if (save_doubles) {
// TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
// Register d15 is just below the marker.
const int offset = ExitFrameConstants::kMarkerOffset;
vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
const int offset = -2 * kPointerSize;
vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
}
@ -736,9 +726,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
str(r3, MemOperand(ip));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, Operand(fp)); // respect ABI stack constraint
ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
// Tear down the exit frame, pop the arguments, and return. Callee-saved
// register r4 still holds argc.
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
mov(pc, lr);
}
@ -933,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask));
b(nz, fail);
b(ne, fail);
}
@ -1392,7 +1385,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
BranchOnSmi(obj, fail);
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
@ -1407,7 +1400,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail,
bool is_heap_object) {
if (!is_heap_object) {
BranchOnSmi(obj, fail);
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(ip, index);
@ -1421,7 +1414,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
BranchOnSmi(function, miss);
JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@ -1520,7 +1513,7 @@ void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
Label done;
if ((flags & OBJECT_NOT_SMI) == 0) {
Label not_smi;
BranchOnNotSmi(object, &not_smi);
JumpIfNotSmi(object, &not_smi);
// Remove smi tag and convert to double.
mov(scratch1, Operand(object, ASR, kSmiTagSize));
vmov(scratch3, scratch1);
@ -1813,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void MacroAssembler::Assert(Condition cc, const char* msg) {
void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code)
Check(cc, msg);
Check(cond, msg);
}
@ -1848,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
}
void MacroAssembler::Check(Condition cc, const char* msg) {
void MacroAssembler::Check(Condition cond, const char* msg) {
Label L;
b(cc, &L);
b(cond, &L);
Abort(msg);
// will not return here
bind(&L);
@ -1946,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
ASSERT_EQ(0, kSmiTag);
STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi);
@ -1956,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
ASSERT_EQ(0, kSmiTag);
STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi);
@ -1964,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
void MacroAssembler::AbortIfSmi(Register object) {
ASSERT_EQ(0, kSmiTag);
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi");
}
void MacroAssembler::AbortIfNotSmi(Register object) {
ASSERT_EQ(0, kSmiTag);
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi");
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map);
b(ne, on_not_heap_number);
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
@ -2003,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2,
Label* failure) {
// Check that neither is a smi.
ASSERT_EQ(0, kSmiTag);
STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask));
b(eq, failure);

42
deps/v8/src/arm/macro-assembler-arm.h

@ -139,7 +139,7 @@ class MacroAssembler: public Assembler {
// scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object,
Register scratch,
Condition cc, // eq for new space, ne otherwise
Condition cond, // eq for new space, ne otherwise
Label* branch);
@ -545,16 +545,6 @@ class MacroAssembler: public Assembler {
}
inline void BranchOnSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
@ -695,14 +685,14 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
@ -719,6 +709,9 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s);
}
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
add(dst, src, Operand(src), s);
}
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
@ -733,7 +726,20 @@ class MacroAssembler: public Assembler {
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
void SmiUntag(Register dst, Register src) {
mov(dst, Operand(src, ASR, kSmiTagSize));
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi.
@ -743,6 +749,14 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// HeapNumber utilities
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number);
// ---------------------------------------------------------------------------
// String utilities

478
deps/v8/src/arm/simulator-arm.cc

File diff suppressed because it is too large

84
deps/v8/src/arm/simulator-arm.h

@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h"
#include "hashmap.h"
namespace assembler {
namespace arm {
namespace v8 {
namespace internal {
class CachePage {
public:
@ -203,11 +203,11 @@ class Simulator {
};
// Unsupported instructions use Format to print an error and stop execution.
void Format(Instr* instr, const char* format);
void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its
// condition bits.
bool ConditionallyExecute(Instr* instr);
bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val);
@ -225,13 +225,13 @@ class Simulator {
void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
void HandleRList(Instr* instr, bool load);
void SoftwareInterrupt(Instr* instr);
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void HandleRList(Instruction* instr, bool load);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
inline bool isStopInstruction(Instr* instr);
inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code);
@ -245,41 +245,42 @@ class Simulator {
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instr* instr);
inline int16_t ReadH(int32_t addr, Instr* instr);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
inline void WriteH(int32_t addr, int16_t value, Instr* instr);
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instr* instr);
inline void WriteW(int32_t addr, int value, Instr* instr);
inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
void DecodeType2(Instr* instr);
void DecodeType3(Instr* instr);
void DecodeType4(Instr* instr);
void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
// Both type 0 and type 1 rolled into one.
void DecodeType01(Instruction* instr);
void DecodeType2(Instruction* instr);
void DecodeType3(Instruction* instr);
void DecodeType4(Instruction* instr);
void DecodeType5(Instruction* instr);
void DecodeType6(Instruction* instr);
void DecodeType7(Instruction* instr);
// Support for VFP.
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr);
void DecodeVCMP(Instr* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.
void InstructionDecode(Instr* instr);
void InstructionDecode(Instruction* instr);
// ICache.
static void CheckICache(Instr* instr);
static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
@ -330,8 +331,8 @@ class Simulator {
static v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
Instruction* break_pc_;
Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
@ -344,27 +345,22 @@ class Simulator {
// instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through.
struct StopCoundAndDesc {
struct StopCountAndDesc {
uint32_t count;
char* desc;
};
StopCoundAndDesc watched_stops[kNumOfWatchedStops];
StopCountAndDesc watched_stops[kNumOfWatchedStops];
};
} } // namespace assembler::arm
namespace v8 {
namespace internal {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \
reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \
Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@ -380,16 +376,16 @@ namespace internal {
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
return Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::arm::Simulator::current()->PopAddress();
Simulator::current()->PopAddress();
}
};

74
deps/v8/src/arm/stub-cache-arm.cc

@ -370,17 +370,20 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
Label* miss,
bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper);
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
support_wrappers ? &check_wrapper : miss);
// Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret();
if (support_wrappers) {
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, Operand(JS_VALUE_TYPE));
@ -392,6 +395,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret();
}
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
@ -521,7 +525,7 @@ static void GenerateCallFunction(MacroAssembler* masm,
// -----------------------------------
// Check that the function really is a function.
__ BranchOnSmi(r1, miss);
__ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@ -660,7 +664,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss);
__ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup);
@ -1194,7 +1198,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -1203,8 +1207,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
@ -1225,7 +1228,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(load_callback_property, 5, 1);
return true;
return Heap::undefined_value(); // Success.
}
@ -1243,7 +1246,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss);
__ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
@ -1511,7 +1514,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss);
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), receiver,
@ -1565,7 +1568,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi.
__ BranchOnNotSmi(r4, &with_write_barrier);
__ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit);
__ Drop(argc + 1);
__ Ret();
@ -1672,7 +1675,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss);
__ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object),
@ -2009,7 +2012,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ BranchOnSmi(r1, &miss);
__ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@ -2168,7 +2171,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ BranchOnNotSmi(r0, &not_smi);
__ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
@ -2646,9 +2649,18 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
__ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ cmp(r5, r6);
__ b(eq, &miss);
// Store the value in the cell.
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret();
@ -2738,12 +2750,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss, &failure);
if (!success) {
MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss);
if (result->IsFailure()) {
miss.Unuse();
return failure;
return result;
}
__ bind(&miss);
@ -2890,12 +2901,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
callback, name, &miss, &failure);
if (!success) {
MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
r4, callback, name, &miss);
if (result->IsFailure()) {
miss.Unuse();
return failure;
return result;
}
__ bind(&miss);
@ -2995,7 +3005,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss);
GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
@ -3361,10 +3371,10 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
Register receiver = r1;
// Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow);
__ JumpIfSmi(receiver, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
__ JumpIfNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
@ -3645,7 +3655,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow);
__ JumpIfSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
@ -3658,7 +3668,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
__ b(ne, &slow);
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
__ JumpIfNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -3678,7 +3688,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number);
__ JumpIfNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));

3
deps/v8/src/assembler.h

@ -185,7 +185,6 @@ class RelocInfo BASE_EMBEDDED {
DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@ -203,7 +202,7 @@ class RelocInfo BASE_EMBEDDED {
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
};

37
deps/v8/src/ast.cc

@ -239,12 +239,19 @@ void ObjectLiteral::CalculateEmitStore() {
HashMap* table;
void* key;
uint32_t index;
Smi* smi_key_location;
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
ASSERT(!name->AsArrayIndex(&index));
if (name->AsArrayIndex(&index)) {
smi_key_location = Smi::FromInt(index);
key = &smi_key_location;
hash = index;
table = &elements;
} else {
key = name.location();
hash = name->Hash();
table = &properties;
}
} else if (handle->ToArrayIndex(&index)) {
key = handle.location();
hash = index;
@ -514,6 +521,8 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
Builtins::LoadIC_FunctionPrototype)) {
is_function_prototype_ = true;
@ -570,7 +579,14 @@ static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (check_type_ == RECEIVER_MAP_CHECK) {
// For primitive checks the holder is set up to point to the
// corresponding prototype object, i.e. one step of the algorithm
// below has been already performed.
// For non-primitive checks we clear it to allow computing targets
// for polymorphic calls.
holder_ = Handle<JSObject>::null();
}
while (true) {
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
@ -640,27 +656,20 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
map = receiver_types_->at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
map = Handle<Map>(
oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
holder_ = Handle<JSObject>(
oracle->GetPrototypeForPrimitiveCheck(check_type_));
map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
}
}
void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
is_smi_only_ = left.IsSmi() && right.IsSmi();
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
if (left.IsSmi() && right.IsSmi()) {
TypeInfo info = oracle->CompareType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);

20
deps/v8/src/ast.h

@ -1205,9 +1205,10 @@ class Property: public Expression {
key_(key),
pos_(pos),
type_(type),
is_monomorphic_(false),
receiver_types_(NULL),
is_monomorphic_(false),
is_array_length_(false),
is_string_length_(false),
is_function_prototype_(false),
is_arguments_access_(false) { }
@ -1221,6 +1222,7 @@ class Property: public Expression {
int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; }
bool IsStringLength() const { return is_string_length_; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Marks that this is actually an argument rewritten to a keyed property
@ -1249,11 +1251,12 @@ class Property: public Expression {
int pos_;
Type type_;
bool is_monomorphic_;
ZoneMapList* receiver_types_;
bool is_array_length_;
bool is_function_prototype_;
bool is_arguments_access_;
bool is_monomorphic_ : 1;
bool is_array_length_ : 1;
bool is_string_length_ : 1;
bool is_function_prototype_ : 1;
bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_;
// Dummy property used during preparsing.
@ -1395,7 +1398,7 @@ class BinaryOperation: public Expression {
Expression* left,
Expression* right,
int pos)
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
: op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR)
? static_cast<int>(GetNextId())
@ -1416,10 +1419,6 @@ class BinaryOperation: public Expression {
Expression* right() const { return right_; }
int position() const { return pos_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiOnly() const { return is_smi_only_; }
// Bailout support.
int RightId() const { return right_id_; }
@ -1428,7 +1427,6 @@ class BinaryOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
bool is_smi_only_;
// The short-circuit logical operations have an AST ID for their
// right-hand subexpression.
int right_id_;

3
deps/v8/src/bootstrapper.cc

@ -1805,9 +1805,8 @@ Genesis::Genesis(Handle<Object> global_object,
AddToWeakGlobalContextList(*global_context_);
Top::set_context(*global_context_);
i::Counters::contexts_created_by_snapshot.Increment();
result_ = global_context_;
JSFunction* empty_function =
JSFunction::cast(result_->function_map()->prototype());
JSFunction::cast(global_context_->function_map()->prototype());
empty_function_ = Handle<JSFunction>(empty_function);
Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy =

7
deps/v8/src/builtins.cc

@ -1228,7 +1228,12 @@ static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
LoadIC::GenerateStringLength(masm);
LoadIC::GenerateStringLength(masm, false);
}
static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
LoadIC::GenerateStringLength(masm, true);
}

1
deps/v8/src/builtins.h

@ -86,6 +86,7 @@ enum BuiltinExtraArguments {
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
\

16
deps/v8/src/code-stubs.h

@ -273,21 +273,20 @@ class FastNewClosureStub : public CodeStub {
class FastNewContextStub : public CodeStub {
public:
// We want no more than 64 different stubs.
static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63;
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots);
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
virtual const char* GetName() { return "FastNewContextStub"; }
virtual Major MajorKey() { return FastNewContext; }
virtual int MinorKey() { return slots_; }
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
@ -600,8 +599,7 @@ class CEntryStub : public CodeStub {
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope,
int alignment_skew = 0);
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);

19
deps/v8/src/deoptimizer.h

@ -128,14 +128,17 @@ class Deoptimizer : public Malloced {
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
// Given the relocation info of a call to the stack check stub, patch the
// code so as to go unconditionally to the on-stack replacement builtin
// instead.
static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
// Given the relocation info of a call to the on-stack replacement
// builtin, patch the code back to the original stack check code.
static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
// Patch all stack guard checks in the unoptimized code to
// unconditionally call replacement_code.
static void PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code);
// Change all patched stack guard checks in the unoptimized code
// back to a normal stack guard check.
static void RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code);
~Deoptimizer();

6
deps/v8/src/frames.cc

@ -695,7 +695,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0);
ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
// BUG(3243555): Since we don't have a lazy-deopt registered at
@ -793,7 +793,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index();
ASSERT(*deopt_index != AstNode::kNoNumber);
ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
return DeoptimizationInputData::cast(code->deoptimization_data());
}
@ -803,7 +803,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0);
ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),

6
deps/v8/src/heap.cc

@ -35,6 +35,7 @@
#include "debug.h"
#include "heap-profiler.h"
#include "global-handles.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@ -400,6 +401,8 @@ void Heap::GarbageCollectionPrologue() {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC();
#endif
LiveObjectList::GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@ -412,6 +415,7 @@ intptr_t Heap::SizeOfObjects() {
}
void Heap::GarbageCollectionEpilogue() {
LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
ZapFromSpace();
@ -1066,6 +1070,8 @@ void Heap::Scavenge() {
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
LiveObjectList::UpdateReferencesForScavengeGC();
ASSERT(new_space_front == new_space_.top());
// Set age mark.

31
deps/v8/src/hydrogen-instructions.cc

@ -490,7 +490,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
#ifdef DEBUG
void HInstruction::Verify() const {
void HInstruction::Verify() {
// Verify that input operands are defined before use.
HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) {
@ -517,6 +517,11 @@ void HInstruction::Verify() const {
if (HasSideEffects() && !IsOsrEntry()) {
ASSERT(next()->IsSimulate());
}
// Verify that instructions that can be eliminated by GVN have overridden
// HValue::DataEquals. The default implementation is UNREACHABLE. We
// don't actually care whether DataEquals returns true or false here.
if (CheckFlag(kUseGVN)) DataEquals(this);
}
#endif
@ -524,7 +529,7 @@ void HInstruction::Verify() const {
HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
for (int i = 0; i < count; ++i) arguments_[i] = NULL;
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
@ -1119,10 +1124,10 @@ void HCompare::PrintDataTo(StringStream* stream) const {
void HCompare::SetInputRepresentation(Representation r) {
input_representation_ = r;
if (r.IsTagged()) {
SetFlagMask(AllSideEffects());
SetAllSideEffects();
ClearFlag(kUseGVN);
} else {
ClearFlagMask(AllSideEffects());
ClearAllSideEffects();
SetFlag(kUseGVN);
}
}
@ -1388,7 +1393,7 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
void HPhi::Verify() const {
void HPhi::Verify() {
ASSERT(OperandCount() == block()->predecessors()->length());
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
@ -1400,49 +1405,49 @@ void HPhi::Verify() const {
}
void HSimulate::Verify() const {
void HSimulate::Verify() {
HInstruction::Verify();
ASSERT(HasAstId());
}
void HBoundsCheck::Verify() const {
void HBoundsCheck::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckSmi::Verify() const {
void HCheckSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckNonSmi::Verify() const {
void HCheckNonSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckInstanceType::Verify() const {
void HCheckInstanceType::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckMap::Verify() const {
void HCheckMap::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckFunction::Verify() const {
void HCheckFunction::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckPrototypeMaps::Verify() const {
void HCheckPrototypeMaps::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}

310
deps/v8/src/hydrogen-instructions.h

@ -46,112 +46,6 @@ class LInstruction;
class LChunkBuilder;
// Type hierarchy:
//
// HValue
// HInstruction
// HAccessArgumentsAt
// HApplyArguments
// HArgumentsElements
// HArgumentsLength
// HArgumentsObject
// HBinaryOperation
// HArithmeticBinaryOperation
// HAdd
// HDiv
// HMod
// HMul
// HSub
// HBitwiseBinaryOperation
// HBitAnd
// HBitOr
// HBitXor
// HSar
// HShl
// HShr
// HBoundsCheck
// HCompare
// HCompareJSObjectEq
// HInstanceOf
// HInstanceOfKnownGlobal
// HLoadKeyed
// HLoadKeyedFastElement
// HLoadKeyedGeneric
// HPower
// HStoreNamed
// HStoreNamedField
// HStoreNamedGeneric
// HStringCharCodeAt
// HBlockEntry
// HCall
// HCallConstantFunction
// HCallFunction
// HCallGlobal
// HCallKeyed
// HCallKnownGlobal
// HCallNamed
// HCallNew
// HCallRuntime
// HCallStub
// HCheckPrototypeMaps
// HConstant
// HControlInstruction
// HDeoptimize
// HGoto
// HUnaryControlInstruction
// HCompareMap
// HReturn
// HTest
// HThrow
// HEnterInlined
// HFunctionLiteral
// HGlobalObject
// HGlobalReceiver
// HLeaveInlined
// HLoadContextSlot
// HLoadGlobal
// HMaterializedLiteral
// HArrayLiteral
// HObjectLiteral
// HRegExpLiteral
// HOsrEntry
// HParameter
// HSimulate
// HStackCheck
// HStoreKeyed
// HStoreKeyedFastElement
// HStoreKeyedGeneric
// HUnaryOperation
// HBitNot
// HChange
// HCheckFunction
// HCheckInstanceType
// HCheckMap
// HCheckNonSmi
// HCheckSmi
// HDeleteProperty
// HFixedArrayLength
// HJSArrayLength
// HLoadElements
// HTypeofIs
// HLoadNamedField
// HLoadNamedGeneric
// HLoadFunctionPrototype
// HPushArgument
// HStringLength
// HTypeof
// HUnaryMathOperation
// HUnaryPredicate
// HClassOfTest
// HHasCachedArrayIndex
// HHasInstanceType
// HIsNull
// HIsObject
// HIsSmi
// HValueOf
// HUnknownOSRValue
// HPhi
#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
V(BinaryOperation) \
@ -224,12 +118,12 @@ class LChunkBuilder;
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@ -268,7 +162,6 @@ class LChunkBuilder;
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
V(FunctionPrototypes) \
V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \
@ -573,11 +466,6 @@ class HValue: public ZoneObject {
return flags << kChangesToDependsFlagsLeftShift;
}
// A flag mask to mark an instruction as having arbitrary side effects.
static int AllSideEffects() {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
static HValue* cast(HValue* value) { return value; }
enum Opcode {
@ -636,9 +524,6 @@ class HValue: public ZoneObject {
return NULL;
}
bool HasSideEffects() const {
return (flags_ & AllSideEffects()) != 0;
}
bool IsDefinedAfter(HBasicBlock* other) const;
// Operands.
@ -661,12 +546,13 @@ class HValue: public ZoneObject {
void Delete();
int flags() const { return flags_; }
void SetFlagMask(int mask) { flags_ |= mask; }
void SetFlag(Flag f) { SetFlagMask(1 << f); }
void ClearFlagMask(int mask) { flags_ &= ~mask; }
void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); }
bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; }
void SetFlag(Flag f) { flags_ |= (1 << f); }
void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
void SetAllSideEffects() { flags_ |= AllSideEffects(); }
void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; }
@ -714,11 +600,16 @@ class HValue: public ZoneObject {
void InsertInputConversion(HInstruction* previous, int index, HType type);
#ifdef DEBUG
virtual void Verify() const = 0;
virtual void Verify() = 0;
#endif
protected:
virtual bool DataEquals(HValue* other) const { return true; }
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
virtual bool DataEquals(HValue* other) const {
UNREACHABLE();
return false;
}
virtual void RepresentationChanged(Representation to) { }
virtual Range* InferRange();
virtual void DeleteFromGraph() = 0;
@ -735,6 +626,11 @@ class HValue: public ZoneObject {
}
private:
// A flag mask to mark an instruction as having arbitrary side effects.
static int AllSideEffects() {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
void InternalReplaceAtUse(HValue* use, HValue* other);
void RegisterUse(int index, HValue* new_value);
@ -774,7 +670,7 @@ class HInstruction: public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
// Returns whether this is some kind of deoptimizing check
@ -1063,7 +959,7 @@ class HSimulate: public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
protected:
@ -1159,6 +1055,9 @@ class HGlobalObject: public HInstruction {
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1171,6 +1070,9 @@ class HGlobalReceiver: public HInstruction {
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1361,6 +1263,9 @@ class HJSArrayLength: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1377,6 +1282,9 @@ class HFixedArrayLength: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1394,6 +1302,9 @@ class HBitNot: public HUnaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1489,6 +1400,9 @@ class HLoadElements: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1510,7 +1424,7 @@ class HCheckMap: public HUnaryOperation {
virtual HType CalculateInferredType() const;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
Handle<Map> map() const { return map_; }
@ -1545,7 +1459,7 @@ class HCheckFunction: public HUnaryOperation {
virtual HType CalculateInferredType() const;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
Handle<JSFunction> target() const { return target_; }
@ -1587,7 +1501,7 @@ class HCheckInstanceType: public HUnaryOperation {
}
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
@ -1628,10 +1542,13 @@ class HCheckNonSmi: public HUnaryOperation {
virtual HType CalculateInferredType() const;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1646,7 +1563,7 @@ class HCheckPrototypeMaps: public HInstruction {
virtual bool IsCheckInstruction() const { return true; }
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
Handle<JSObject> prototype() const { return prototype_; }
@ -1689,10 +1606,13 @@ class HCheckSmi: public HUnaryOperation {
virtual HType CalculateInferredType() const;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1745,7 +1665,7 @@ class HPhi: public HValue {
virtual void PrintTo(StringStream* stream) const;
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
DECLARE_INSTRUCTION(Phi)
@ -1833,7 +1753,7 @@ class HConstant: public HInstruction {
}
#ifdef DEBUG
virtual void Verify() const { }
virtual void Verify() { }
#endif
DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
@ -1952,6 +1872,9 @@ class HArgumentsElements: public HInstruction {
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1963,6 +1886,9 @@ class HArgumentsLength: public HUnaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -1999,6 +1925,8 @@ class HAccessArgumentsAt: public HInstruction {
operands_[index] = value;
}
virtual bool DataEquals(HValue* other) const { return true; }
private:
HOperandVector<3> operands_;
};
@ -2018,13 +1946,16 @@ class HBoundsCheck: public HBinaryOperation {
}
#ifdef DEBUG
virtual void Verify() const;
virtual void Verify();
#endif
HValue* index() const { return left(); }
HValue* length() const { return right(); }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2034,7 +1965,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {
@ -2044,7 +1975,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearFlagMask(AllSideEffects());
ClearAllSideEffects();
SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
}
@ -2062,12 +1993,12 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) {
ClearFlagMask(AllSideEffects());
ClearAllSideEffects();
SetFlag(kUseGVN);
}
}
@ -2093,7 +2024,7 @@ class HCompare: public HBinaryOperation {
: HBinaryOperation(left, right), token_(token) {
ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
void SetInputRepresentation(Representation r);
@ -2142,6 +2073,9 @@ class HCompareJSObjectEq: public HBinaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2184,6 +2118,9 @@ class HIsObject: public HUnaryPredicate {
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2192,6 +2129,9 @@ class HIsSmi: public HUnaryPredicate {
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2228,6 +2168,9 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2278,7 +2221,7 @@ class HInstanceOf: public HBinaryOperation {
public:
HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
@ -2296,7 +2239,7 @@ class HInstanceOfKnownGlobal: public HUnaryOperation {
HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
: HUnaryOperation(left), function_(right) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
Handle<JSFunction> function() { return function_; }
@ -2326,6 +2269,9 @@ class HPower: public HBinaryOperation {
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2348,6 +2294,8 @@ class HAdd: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Add, "add")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2363,6 +2311,8 @@ class HSub: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2383,6 +2333,8 @@ class HMul: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2398,6 +2350,8 @@ class HMod: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2414,6 +2368,8 @@ class HDiv: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div, "div")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2429,6 +2385,8 @@ class HBitAnd: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2442,6 +2400,9 @@ class HBitXor: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2456,6 +2417,8 @@ class HBitOr: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange();
};
@ -2469,6 +2432,9 @@ class HShl: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2480,6 +2446,9 @@ class HShr: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2492,6 +2461,9 @@ class HSar: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
@ -2534,7 +2506,7 @@ class HCallStub: public HInstruction {
argument_count_(argument_count),
transcendental_type_(TranscendentalCache::kNumberOfCaches) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
CodeStub::Major major_key() { return major_key_; }
@ -2603,12 +2575,17 @@ class HLoadGlobal: public HInstruction {
class HStoreGlobal: public HUnaryOperation {
public:
HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell)
: HUnaryOperation(value), cell_(cell) {
HStoreGlobal(HValue* value,
Handle<JSGlobalPropertyCell> cell,
bool check_hole_value)
: HUnaryOperation(value),
cell_(cell),
check_hole_value_(check_hole_value) {
SetFlag(kChangesGlobalVars);
}
Handle<JSGlobalPropertyCell> cell() const { return cell_; }
bool check_hole_value() const { return check_hole_value_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
@ -2617,14 +2594,9 @@ class HStoreGlobal: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
protected:
virtual bool DataEquals(HValue* other) const {
HStoreGlobal* b = HStoreGlobal::cast(other);
return cell_.is_identical_to(b->cell());
}
private:
Handle<JSGlobalPropertyCell> cell_;
bool check_hole_value_;
};
@ -2704,7 +2676,7 @@ class HLoadNamedGeneric: public HUnaryOperation {
HLoadNamedGeneric(HValue* object, Handle<Object> name)
: HUnaryOperation(object), name_(name) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
HValue* object() const { return OperandAt(0); }
@ -2716,12 +2688,6 @@ class HLoadNamedGeneric: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
protected:
virtual bool DataEquals(HValue* other) const {
HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
return name_.is_identical_to(b->name_);
}
private:
Handle<Object> name_;
};
@ -2732,7 +2698,8 @@ class HLoadFunctionPrototype: public HUnaryOperation {
explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlagMask(kDependsOnFunctionPrototypes);
SetFlag(kUseGVN);
SetFlag(kDependsOnCalls);
}
HValue* function() const { return OperandAt(0); }
@ -2781,13 +2748,16 @@ class HLoadKeyedFastElement: public HLoadKeyed {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
"load_keyed_fast_element")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HLoadKeyedGeneric: public HLoadKeyed {
public:
HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
@ -2823,12 +2793,6 @@ class HStoreNamed: public HBinaryOperation {
DECLARE_INSTRUCTION(StoreNamed)
protected:
virtual bool DataEquals(HValue* other) const {
HStoreNamed* b = HStoreNamed::cast(other);
return name_.is_identical_to(b->name_);
}
private:
Handle<Object> name_;
};
@ -2874,7 +2838,7 @@ class HStoreNamedGeneric: public HStoreNamed {
public:
HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
: HStoreNamed(obj, name, val) {
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
@ -2939,7 +2903,7 @@ class HStoreKeyedGeneric: public HStoreKeyed {
public:
HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
: HStoreKeyed(obj, key, val) {
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
@ -2960,14 +2924,14 @@ class HStringCharCodeAt: public HBinaryOperation {
: Representation::Tagged();
}
virtual bool DataEquals(HValue* other) const { return true; }
HValue* string() const { return OperandAt(0); }
HValue* index() const { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxUC16CharCode);
}
@ -2990,11 +2954,11 @@ class HStringLength: public HUnaryOperation {
return HType::Smi();
}
virtual bool DataEquals(HValue* other) const { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange() {
return new Range(0, String::kMaxLength);
}
@ -3128,7 +3092,7 @@ class HDeleteProperty: public HBinaryOperation {
HDeleteProperty(HValue* obj, HValue* key)
: HBinaryOperation(obj, key) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
SetAllSideEffects();
}
virtual Representation RequiredInputRepresentation(int index) const {

46
deps/v8/src/hydrogen.cc

@ -684,7 +684,7 @@ HGraph::HGraph(CompilationInfo* info)
}
bool HGraph::AllowAggressiveOptimizations() const {
bool HGraph::AllowCodeMotion() const {
return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
}
@ -1446,19 +1446,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
}
}
// Only move instructions that postdominate the loop header (i.e. are
// always executed inside the loop). This is to avoid unnecessary
// deoptimizations assuming the loop is executed at least once.
// TODO(fschneider): Better type feedback should give us information
// about code that was never executed.
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
if (FLAG_aggressive_loop_invariant_motion &&
!instr->IsChange() &&
(!instr->IsCheckInstruction() ||
graph_->AllowAggressiveOptimizations())) {
// If we've disabled code motion, don't move any instructions.
if (!graph_->AllowCodeMotion()) return false;
// If --aggressive-loop-invariant-motion, move everything except change
// instructions.
if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
return true;
}
// Otherwise only move instructions that postdominate the loop header
// (i.e. are always executed inside the loop). This is to avoid
// unnecessary deoptimizations assuming the loop is executed at least
// once. TODO(fschneider): Better type feedback should give us
// information about code that was never executed.
HBasicBlock* block = instr->block();
bool result = true;
if (block != loop_header) {
@ -3366,9 +3370,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupGlobalPropertyCell(var, &lookup, true);
CHECK_BAILOUT;
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(graph()->info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr = new HStoreGlobal(value, cell);
HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
instr->set_position(position);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id);
@ -3385,7 +3390,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
// We have a second position recorded in the FullCodeGenerator to have
// type feedback for the binary operation.
BinaryOperation* operation = expr->binary_operation();
operation->RecordTypeFeedback(oracle());
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
@ -3766,6 +3770,14 @@ void HGraphBuilder::VisitProperty(Property* expr) {
AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
instr = new HJSArrayLength(array);
} else if (expr->IsStringLength()) {
HValue* string = Pop();
AddInstruction(new HCheckNonSmi(string));
AddInstruction(new HCheckInstanceType(string,
FIRST_STRING_TYPE,
LAST_STRING_TYPE));
instr = new HStringLength(string);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
AddInstruction(new HCheckNonSmi(function));
@ -3952,8 +3964,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
int count_before = AstNode::Count();
// Parse and allocate variables.
Handle<SharedFunctionInfo> shared(target->shared());
CompilationInfo inner_info(shared);
CompilationInfo inner_info(target);
if (!ParserApi::Parse(&inner_info) ||
!Scope::Analyze(&inner_info)) {
return false;
@ -3976,9 +3987,10 @@ bool HGraphBuilder::TryInline(Call* expr) {
// Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters.
Handle<SharedFunctionInfo> shared(target->shared());
int arity = expr->arguments()->length();
if (function->scope()->arguments() != NULL ||
arity != target->shared()->formal_parameter_count()) {
arity != shared->formal_parameter_count()) {
return false;
}
@ -4801,7 +4813,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
default:
UNREACHABLE();
}
TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT);
TypeInfo info = oracle()->BinaryType(expr);
// If we hit an uninitialized binary op stub we will get type info
// for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation.
@ -4952,7 +4964,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop();
Token::Value op = expr->op();
TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
TypeInfo info = oracle()->CompareType(expr);
HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not

2
deps/v8/src/hydrogen.h

@ -297,7 +297,7 @@ class HGraph: public HSubgraph {
CompilationInfo* info() const { return info_; }
bool AllowAggressiveOptimizations() const;
bool AllowCodeMotion() const;
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }

111
deps/v8/src/ia32/code-stubs-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -91,7 +91,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
__ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
@ -100,7 +101,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(slots_)));
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
@ -118,7 +119,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
@ -1772,40 +1773,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If one of the arguments is a string, call the string add stub.
// Otherwise, transition to the generic TRBinaryOpIC type.
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ test(right, Immediate(kSmiTagMask));
__ j(zero, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_runtime);
// Try to add arguments as strings, otherwise, transition to the generic
// TRBinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
@ -2346,36 +2318,8 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&call_runtime);
switch (op_) {
case Token::ADD: {
GenerateAddStrings(masm);
GenerateRegisterArgsPush(masm);
// Test for string arguments before calling runtime.
// Registers containing left and right operands respectively.
Register lhs, rhs;
lhs = edx;
rhs = eax;
// Test if left operand is a string.
NearLabel lhs_not_string;
__ test(lhs, Immediate(kSmiTagMask));
__ j(zero, &lhs_not_string);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &lhs_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
__ TailCallStub(&string_add_left_stub);
NearLabel call_add_runtime;
// Left operand is not a string, test right.
__ bind(&lhs_not_string);
__ test(rhs, Immediate(kSmiTagMask));
__ j(zero, &call_add_runtime);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_add_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_add_runtime);
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
}
@ -2418,6 +2362,40 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
NearLabel call_runtime;
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ test(right, Immediate(kSmiTagMask));
__ j(zero, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_runtime);
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
@ -4660,8 +4638,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope,
int /* alignment_skew */) {
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)

1
deps/v8/src/ia32/code-stubs-ia32.h

@ -308,6 +308,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);

12
deps/v8/src/ia32/codegen-ia32.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
// Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@ -8230,19 +8230,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to look up the context holding the named
// Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(variable->name()));
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
ASSERT(context.is_register());
frame_->EmitPush(context.reg());
context.Unuse();
frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}

48
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -106,8 +106,22 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
// Iterate the unoptimized code and patch every stack check except at
// the function entry. This code assumes the function entry stack
// check appears first i.e., is not deferred or otherwise reordered.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
bool first = true;
for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
!it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == Code::cast(check_code)->entry()) {
if (first) {
first = false;
} else {
// The stack check code matches the pattern:
//
// cmp esp, <limit>
@ -132,9 +146,20 @@ void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
*(call_target_address - 2) = 0x90; // nop
rinfo->set_target_address(replacement_code->entry());
}
}
}
}
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
// Iterate the unoptimized code and revert all the patched stack checks.
for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
!it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == replacement_code->entry()) {
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
Address call_target_address = rinfo->pc();
@ -145,6 +170,8 @@ void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
*(call_target_address - 2) = 0x07; // offset
rinfo->set_target_address(check_code->entry());
}
}
}
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
@ -507,26 +534,25 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
__ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
__ mov(Operand(ebx, offset), ecx);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(ebx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0);
}
// Remove the bailout id and the general purpose registers from the stack.
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
__ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
__ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else {
__ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
__ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
@ -591,7 +617,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset();
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(ebx, offset));
}

15
deps/v8/src/ia32/full-codegen-ia32.cc

@ -142,7 +142,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@ -764,6 +764,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@ -3689,19 +3691,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else {
// Non-global variable. Call the runtime to look up the context
// where the variable was introduced.
// Non-global variable. Call the runtime to delete from the
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kLookupContext, 2);
__ push(eax);
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
}
break;

6
deps/v8/src/ia32/ic-ia32.cc

@ -388,7 +388,8 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm,
bool support_wrappers) {
// ----------- S t a t e -------------
// -- eax : receiver
// -- ecx : name
@ -396,7 +397,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss);
StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}

49
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -566,37 +566,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index);
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
if (kind & Safepoint::kWithRegisters) {
// Register esi always contains a pointer to the context.
safepoint.DefinePointerRegister(esi);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint =
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register esi always contains a pointer to the context.
safepoint.DefinePointerRegister(esi);
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
deoptimization_index);
}
@ -1908,7 +1911,19 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0));
__ mov(Operand::Cell(instr->hydrogen()->cell()), value);
Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->check_hole_value()) {
__ cmp(cell_operand, Factory::the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
__ mov(cell_operand, value);
}

4
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -198,6 +198,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,

11
deps/v8/src/ia32/lithium-gap-resolver-ia32.cc

@ -32,12 +32,11 @@ namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), spilled_register_(-1) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
source_uses_[i] = 0;
destination_uses_[i] = 0;
}
}
: cgen_(owner),
moves_(32),
source_uses_(),
destination_uses_(),
spilled_register_(-1) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {

7
deps/v8/src/ia32/lithium-ia32.cc

@ -1343,8 +1343,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1645,7 +1645,8 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
return new LStoreGlobal(UseRegisterAtStart(instr->value()));
LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
}

6
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -339,7 +339,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(Operand(esp), Immediate(space));
int offset = -2 * kPointerSize;
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@ -382,7 +382,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all XMM registers.
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
int offset = -2 * kPointerSize;
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));

115
deps/v8/src/ia32/stub-cache-ia32.cc

@ -327,17 +327,20 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss) {
Label* miss,
bool support_wrappers) {
Label check_wrapper;
// Check if the object is a string leaving the instance type in the
// scratch register.
GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
GenerateStringCheck(masm, receiver, scratch1, miss,
support_wrappers ? &check_wrapper : miss);
// Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0);
if (support_wrappers) {
// Check if the object is a JSValue wrapper.
__ bind(&check_wrapper);
__ cmp(scratch1, JS_VALUE_TYPE);
@ -350,6 +353,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
__ ret(0);
}
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
@ -451,10 +455,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function.
static bool GenerateFastApiCall(MacroAssembler* masm,
static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization,
int argc,
Failure** failure) {
int argc) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : object passing the type check
@ -516,13 +519,8 @@ static bool GenerateFastApiCall(MacroAssembler* masm,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
MaybeObject* result =
masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
return true;
return masm->TryCallApiFunctionAndReturn(&fun,
argc + kFastApiCallArguments + 1);
}
@ -535,7 +533,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_(arguments),
name_(name) {}
bool Compile(MacroAssembler* masm,
MaybeObject* Compile(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
String* name,
@ -544,8 +542,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
Label* miss,
Failure** failure) {
Label* miss) {
ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@ -566,8 +563,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
lookup,
name,
optimization,
miss,
failure);
miss);
} else {
CompileRegular(masm,
object,
@ -578,12 +574,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name,
holder,
miss);
return true;
return Heap::undefined_value(); // Success.
}
}
private:
bool CompileCacheable(MacroAssembler* masm,
MaybeObject* CompileCacheable(MacroAssembler* masm,
JSObject* object,
Register receiver,
Register scratch1,
@ -593,8 +589,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
LookupResult* lookup,
String* name,
const CallOptimization& optimization,
Label* miss_label,
Failure** failure) {
Label* miss_label) {
ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject());
@ -656,11 +651,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function.
if (can_do_fast_api_call) {
bool success = GenerateFastApiCall(masm, optimization,
arguments_.immediate(), failure);
if (!success) {
return false;
}
MaybeObject* result =
GenerateFastApiCall(masm, optimization, arguments_.immediate());
if (result->IsFailure()) return result;
} else {
__ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION);
@ -679,7 +672,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1);
}
return true;
return Heap::undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@ -1057,7 +1050,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
}
bool StubCompiler::GenerateLoadCallback(JSObject* object,
MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -1066,8 +1059,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure) {
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
@ -1122,13 +1114,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
return true;
return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
}
@ -2280,17 +2266,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
if (depth != kInvalidProtoDepth) {
Failure* failure;
// Move the return address on top of the stack.
__ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten.
bool success = GenerateFastApiCall(masm(), optimization, argc, &failure);
if (!success) {
return failure;
}
MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
if (result->IsFailure()) return result;
} else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
}
@ -2335,8 +2318,7 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), ecx);
Failure* failure;
bool success = compiler.Compile(masm(),
MaybeObject* result = compiler.Compile(masm(),
object,
holder,
name,
@ -2345,11 +2327,8 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
ebx,
edi,
eax,
&miss,
&failure);
if (!success) {
return failure;
}
&miss);
if (result->IsFailure()) return result;
// Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -2603,14 +2582,24 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
// Store the value in the cell.
// Compute the cell operand to use.
Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
if (Serializer::enabled()) {
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
} else {
__ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
}
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
__ cmp(cell_operand, Factory::the_hole_value());
__ j(equal, &miss);
// Store the value in the cell.
__ mov(cell_operand, eax);
// Return the value (register eax).
__ IncrementCounter(&Counters::named_store_global_inline, 1);
__ ret(0);
@ -2799,12 +2788,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
callback, name, &miss, &failure);
if (!success) {
MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
edi, callback, name, &miss);
if (result->IsFailure()) {
miss.Unuse();
return failure;
return result;
}
__ bind(&miss);
@ -2968,12 +2956,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
callback, name, &miss, &failure);
if (!success) {
MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
ecx, edi, callback, name, &miss);
if (result->IsFailure()) {
miss.Unuse();
return failure;
return result;
}
__ bind(&miss);
@ -3089,7 +3076,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

47
deps/v8/src/ic.cc

@ -822,6 +822,9 @@ MaybeObject* LoadIC::Load(State state,
}
if (FLAG_use_ic) {
Code* non_monomorphic_stub =
(state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
// Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of
@ -829,22 +832,27 @@ MaybeObject* LoadIC::Load(State state,
if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(Heap::length_symbol())) {
HandleScope scope;
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
object = Handle<Object>(Handle<JSValue>::cast(object)->value());
}
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif
Map* map = HeapObject::cast(*object)->map();
if (state == PREMONOMORPHIC) {
if (object->IsString()) {
Map* map = HeapObject::cast(*object)->map();
const int offset = String::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
} else {
set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
} else {
set_target(non_monomorphic_stub);
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
object = Handle<Object>(Handle<JSValue>::cast(object)->value());
}
Code* target = NULL;
target = Builtins::builtin(Builtins::LoadIC_StringLength);
set_target(target);
return Smi::FromInt(String::cast(*object)->length());
}
@ -853,12 +861,14 @@ MaybeObject* LoadIC::Load(State state,
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
if (state == PREMONOMORPHIC) {
Map* map = HeapObject::cast(*object)->map();
const int offset = JSArray::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength);
set_target(target);
set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
} else {
set_target(non_monomorphic_stub);
}
return JSArray::cast(*object)->length();
}
@ -868,8 +878,11 @@ MaybeObject* LoadIC::Load(State state,
#ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif
Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype);
set_target(target);
if (state == PREMONOMORPHIC) {
set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
} else {
set_target(non_monomorphic_stub);
}
return Accessors::FunctionGetPrototype(*object, 0);
}
}
@ -1092,6 +1105,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
}
if (FLAG_use_ic) {
// TODO(1073): don't ignore the current stub state.
// Use specialized code for getting the length of strings.
if (object->IsString() && name->Equals(Heap::length_symbol())) {
Handle<String> string = Handle<String>::cast(object);
@ -2098,8 +2113,6 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
if (!code.is_null()) {
TRBinaryOpIC ic;
ic.patch(*code);
if (FLAG_trace_ic) {
PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
TRBinaryOpIC::GetName(previous_type),
@ -2107,6 +2120,8 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
TRBinaryOpIC ic;
ic.patch(*code);
// Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) {

3
deps/v8/src/ic.h

@ -284,7 +284,8 @@ class LoadIC: public IC {
// Specialized code generator routines.
static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateStringLength(MacroAssembler* masm);
static void GenerateStringLength(MacroAssembler* masm,
bool support_wrappers);
static void GenerateFunctionPrototype(MacroAssembler* masm);
// Clear the use of the inlined version.

10
deps/v8/src/mark-compact.cc

@ -33,6 +33,7 @@
#include "gdb-jit.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "stub-cache.h"
@ -1660,6 +1661,7 @@ inline void EncodeForwardingAddressesInRange(Address start,
free_start = current;
is_prev_alive = false;
}
LiveObjectList::ProcessNonLive(object);
}
}
@ -1880,6 +1882,9 @@ static void SweepNewSpace(NewSpace* space) {
size,
false);
} else {
// Process the dead object before we write a NULL into its header.
LiveObjectList::ProcessNonLive(object);
size = object->Size();
Memory::Address_at(current) = NULL;
}
@ -1899,6 +1904,7 @@ static void SweepNewSpace(NewSpace* space) {
// Update roots.
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
@ -1986,6 +1992,7 @@ static void SweepSpace(PagedSpace* space) {
free_start = current;
is_previous_alive = false;
}
LiveObjectList::ProcessNonLive(object);
}
// The object is now unmarked for the call to Size() at the top of the
// loop.
@ -2164,6 +2171,7 @@ class MapCompact {
void UpdateMapPointersInRoots() {
Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
LiveObjectList::IterateElements(&map_updating_visitor_);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
@ -2533,6 +2541,8 @@ void MarkCompactCollector::UpdatePointers() {
// Update the pointer to the head of the weak list of global contexts.
updating_visitor.VisitPointer(&Heap::global_contexts_list_);
LiveObjectList::IterateElements(&updating_visitor);
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),

36
deps/v8/src/messages.js

@ -82,20 +82,39 @@ function FormatString(format, args) {
var result = format;
for (var i = 0; i < args.length; i++) {
var str;
try { str = ToDetailString(args[i]); }
catch (e) { str = "#<error>"; }
try {
str = ToDetailString(args[i]);
} catch (e) {
str = "#<error>";
}
result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
}
return result;
}
// To check if something is a native error we need to check the
// concrete native error types. It is not enough to check "obj
// instanceof $Error" because user code can replace
// NativeError.prototype.__proto__. User code cannot replace
// NativeError.prototype though and therefore this is a safe test.
function IsNativeErrorObject(obj) {
return (obj instanceof $Error) ||
(obj instanceof $EvalError) ||
(obj instanceof $RangeError) ||
(obj instanceof $ReferenceError) ||
(obj instanceof $SyntaxError) ||
(obj instanceof $TypeError) ||
(obj instanceof $URIError);
}
// When formatting internally created error messages, do not
// invoke overwritten error toString methods but explicitly use
// the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
if (obj instanceof $Error) {
if (IsNativeErrorObject(obj)) {
return %_CallFunction(obj, errorToString);
} else {
return ToString(obj);
@ -108,7 +127,9 @@ function ToDetailString(obj) {
var constructor = obj.constructor;
if (!constructor) return ToStringCheckErrorObject(obj);
var constructorName = constructor.name;
if (!constructorName) return ToStringCheckErrorObject(obj);
if (!constructorName || !IS_STRING(constructorName)) {
return ToStringCheckErrorObject(obj);
}
return "#<" + GetInstanceName(constructorName) + ">";
} else {
return ToStringCheckErrorObject(obj);
@ -216,6 +237,13 @@ function FormatMessage(message) {
strict_param_dupe: "Strict mode function may not have duplicate parameter names",
strict_var_name: "Variable name may not be eval or arguments in strict mode",
strict_function_name: "Function name may not be eval or arguments in strict mode",
strict_octal_literal: "Octal literals are not allowed in strict mode.",
strict_duplicate_property: "Duplicate data property in object literal not allowed in strict mode",
accessor_data_property: "Object literal may not have data and accessor property with the same name",
accessor_get_set: "Object literal may not have multiple get/set accessors with the same name",
strict_lhs_eval_assignment: "Assignment to eval or arguments is not allowed in strict mode",
strict_lhs_postfix: "Postfix increment/decrement may not have eval or arguments operand in strict mode",
strict_lhs_prefix: "Prefix increment/decrement may not have eval or arguments operand in strict mode",
};
}
var format = kMessages[message.type];

3
deps/v8/src/objects-inl.h

@ -57,8 +57,7 @@ Smi* PropertyDetails::AsSmi() {
PropertyDetails PropertyDetails::AsDeleted() {
PropertyDetails d(DONT_ENUM, NORMAL);
Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}

211
deps/v8/src/parser.cc

@ -664,7 +664,11 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
TemporaryScope temp_scope(&this->temp_scope_);
ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true;
int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok);
if (ok && temp_scope_->StrictMode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
if (ok) {
result = new FunctionLiteral(
no_name,
@ -2288,6 +2292,11 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
expression = NewThrowReferenceError(type);
}
if (temp_scope_->StrictMode()) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@ -2514,6 +2523,12 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type);
}
if (temp_scope_->StrictMode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(op, expression);
return new CountOperation(true /* prefix */, increment, position);
@ -2539,6 +2554,12 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
expression = NewThrowReferenceError(type);
}
if (temp_scope_->StrictMode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
Token::Value next = Next();
int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(next, expression);
@ -3012,6 +3033,126 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
return Factory::undefined_value();
}
// Defined in ast.cc
bool IsEqualString(void* first, void* second);
bool IsEqualSmi(void* first, void* second);
// Validation per 11.1.5 Object Initialiser
class ObjectLiteralPropertyChecker {
public:
ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
props(&IsEqualString),
elems(&IsEqualSmi),
parser_(parser),
strict_(strict) {
}
void CheckProperty(
ObjectLiteral::Property* property,
Scanner::Location loc,
bool* ok);
private:
enum PropertyKind {
kGetAccessor = 0x01,
kSetAccessor = 0x02,
kAccessor = kGetAccessor | kSetAccessor,
kData = 0x04
};
static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
switch (property->kind()) {
case ObjectLiteral::Property::GETTER:
return kGetAccessor;
case ObjectLiteral::Property::SETTER:
return kSetAccessor;
default:
return kData;
}
}
HashMap props;
HashMap elems;
Parser* parser_;
bool strict_;
};
void ObjectLiteralPropertyChecker::CheckProperty(
ObjectLiteral::Property* property,
Scanner::Location loc,
bool* ok) {
ASSERT(property != NULL);
Literal *lit = property->key();
Handle<Object> handle = lit->handle();
uint32_t hash;
HashMap* map;
void* key;
Smi* smi_key_location;
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
smi_key_location = Smi::FromInt(hash);
key = &smi_key_location;
map = &elems;
} else {
key = handle.location();
hash = name->Hash();
map = &props;
}
} else if (handle->ToArrayIndex(&hash)) {
key = handle.location();
map = &elems;
} else {
ASSERT(handle->IsNumber());
double num = handle->Number();
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
map = &props;
}
// Lookup property previously defined, if any.
HashMap::Entry* entry = map->Lookup(key, hash, true);
intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
intptr_t curr = GetPropertyKind(property);
// Duplicate data properties are illegal in strict mode.
if (strict_ && (curr & prev & kData) != 0) {
parser_->ReportMessageAt(loc, "strict_duplicate_property",
Vector<const char*>::empty());
*ok = false;
return;
}
// Data property conflicting with an accessor.
if (((curr & kData) && (prev & kAccessor)) ||
((prev & kData) && (curr & kAccessor))) {
parser_->ReportMessageAt(loc, "accessor_data_property",
Vector<const char*>::empty());
*ok = false;
return;
}
// Two accessors of the same type conflicting
if ((curr & prev & kAccessor) != 0) {
parser_->ReportMessageAt(loc, "accessor_get_set",
Vector<const char*>::empty());
*ok = false;
return;
}
// Update map
entry->value = reinterpret_cast<void*> (prev | curr);
*ok = true;
}
void Parser::BuildObjectLiteralConstantProperties(
ZoneList<ObjectLiteral::Property*>* properties,
@ -3117,12 +3258,20 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
new ZoneList<ObjectLiteral::Property*>(4);
int number_of_boilerplate_properties = 0;
ObjectLiteralPropertyChecker checker(this, temp_scope_->StrictMode());
Expect(Token::LBRACE, CHECK_OK);
Scanner::Location loc = scanner().location();
while (peek() != Token::RBRACE) {
if (fni_ != NULL) fni_->Enter();
Literal* key = NULL;
Token::Value next = peek();
// Location of the property name token
Scanner::Location loc = scanner().peek_location();
switch (next) {
case Token::IDENTIFIER: {
bool is_getter = false;
@ -3132,11 +3281,15 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) {
// Update loc to point to the identifier
loc = scanner().peek_location();
ObjectLiteral::Property* property =
ParseObjectLiteralGetSet(is_getter, CHECK_OK);
if (IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++;
}
// Validate the property.
checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property);
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
@ -3193,6 +3346,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
// Validate the property
checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property);
// TODO(1240767): Consider allowing trailing comma.
@ -3204,6 +3359,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
}
}
Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout.
int literal_index = temp_scope_->NextMaterializedLiteralIndex();
@ -3296,10 +3452,21 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner().location().beg_pos;
Scanner::Location name_loc = Scanner::NoLocation();
Scanner::Location dupe_loc = Scanner::NoLocation();
bool done = (peek() == Token::RPAREN);
while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
name_loc = scanner().location();
}
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
dupe_loc = scanner().location();
}
Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
top_scope_->AddParameter(parameter);
num_parameters++;
@ -3384,7 +3551,19 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
*ok = false;
return NULL;
}
// TODO(mmaly): Check for octal escape sequence here.
if (name_loc.IsValid()) {
ReportMessageAt(name_loc, "strict_param_name",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (dupe_loc.IsValid()) {
ReportMessageAt(dupe_loc, "strict_param_dupe",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
}
FunctionLiteral* function_literal =
@ -3531,6 +3710,36 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
}
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok) {
ASSERT(temp_scope_->StrictMode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
ReportMessage(error, Vector<const char*>::empty());
*ok = false;
}
}
// Checks whether octal literal last seen is between beg_pos and end_pos.
// If so, reports an error.
void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
int octal = scanner().octal_position();
if (beg_pos <= octal && octal <= end_pos) {
ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
Vector<const char*>::empty());
scanner().clear_octal_position();
*ok = false;
}
}
// This function reads an identifier and determines whether or not it
// is 'get' or 'set'. The reason for not using ParseIdentifier and
// checking on the output is that this involves heap allocation which

8
deps/v8/src/parser.h

@ -613,6 +613,14 @@ class Parser {
bool* is_set,
bool* ok);
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok);
// Strict mode octal literal validation.
void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
// Parser support
VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
FunctionLiteral* fun,

2
deps/v8/src/platform-freebsd.cc

@ -430,7 +430,7 @@ Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
set_names(name);
set_name(name);
}

19
deps/v8/src/runtime-profiler.cc

@ -193,22 +193,9 @@ static void AttemptOnStackReplacement(JSFunction* function) {
if (maybe_check_code->ToObject(&check_code)) {
Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
Code* unoptimized_code = shared->code();
// Iterate the unoptimized code and patch every stack check except at
// the function entry. This code assumes the function entry stack
// check appears first i.e., is not deferred or otherwise reordered.
bool first = true;
for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
!it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == Code::cast(check_code)->entry()) {
if (first) {
first = false;
} else {
Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
}
}
}
Deoptimizer::PatchStackCheckCode(unoptimized_code,
Code::cast(check_code),
replacement_code);
}
}

49
deps/v8/src/runtime.cc

@ -6944,15 +6944,9 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
Handle<Code> check_code = check_stub.GetCode();
Handle<Code> replacement_code(
Builtins::builtin(Builtins::OnStackReplacement));
// Iterate the unoptimized code and revert all the patched stack checks.
for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
!it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == replacement_code->entry()) {
Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
}
}
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
*replacement_code);
// Allow OSR only at nesting level zero again.
unoptimized->set_allow_osr_at_loop_nesting_level(0);
@ -7049,7 +7043,7 @@ static MaybeObject* Runtime_PushCatchContext(Arguments args) {
}
static MaybeObject* Runtime_LookupContext(Arguments args) {
static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
@ -7059,16 +7053,31 @@ static MaybeObject* Runtime_LookupContext(Arguments args) {
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes);
Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
if (index < 0 && !holder.is_null()) {
ASSERT(holder->IsJSObject());
return *holder;
// If the slot was not found the result is true.
if (holder.is_null()) {
return Heap::true_value();
}
// No intermediate context found. Use global object by default.
return Top::context()->global();
// If the slot was found in a context, it should be DONT_DELETE.
if (holder->IsContext()) {
return Heap::false_value();
}
// The slot was found in a JSObject, either a context extension object,
// the global object, or an arguments object. Try to delete it
// (respecting DONT_DELETE). For consistency with V8's usual behavior,
// which allows deleting all parameters in functions that mention
// 'arguments', we do this even for the case of slots found on an
// arguments object. The slot was found on an arguments object if the
// index is non-negative.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
if (index >= 0) {
return object->DeleteElement(index, JSObject::NORMAL_DELETION);
} else {
return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
}
}
@ -7141,8 +7150,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes);
Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
// If the index is non-negative, the slot has been found in a local
// variable or a parameter. Read it from the context object or the
@ -7209,8 +7217,7 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder =
context->Lookup(name, flags, &index, &attributes);
Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
if (index >= 0) {
if (holder->IsContext()) {

2
deps/v8/src/runtime.h

@ -284,7 +284,7 @@ namespace internal {
F(NewContext, 1, 1) \
F(PushContext, 1, 1) \
F(PushCatchContext, 1, 1) \
F(LookupContext, 2, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
F(StoreContextSlot, 3, 1) \

41
deps/v8/src/safepoint-table.cc

@ -117,55 +117,26 @@ void Safepoint::DefinePointerRegister(Register reg) {
}
Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
Safepoint SafepointTableBuilder::DefineSafepoint(
Assembler* assembler, Safepoint::Kind kind, int arguments,
int deoptimization_index) {
ASSERT(deoptimization_index != -1);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = 0;
pc_and_deoptimization_index.has_doubles = false;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(NULL);
return Safepoint(indexes_.last(), registers_.last());
}
Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
Assembler* assembler, int arguments, int deoptimization_index) {
ASSERT(deoptimization_index != -1);
ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments;
pc_and_deoptimization_index.has_doubles = false;
pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(new ZoneList<int>(4));
registers_.Add((kind & Safepoint::kWithRegisters)
? new ZoneList<int>(4)
: NULL);
return Safepoint(indexes_.last(), registers_.last());
}
Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
Assembler* assembler, int arguments, int deoptimization_index) {
ASSERT(deoptimization_index != -1);
ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments;
pc_and_deoptimization_index.has_doubles = true;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(new ZoneList<int>(4));
return Safepoint(indexes_.last(), registers_.last());
}
unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_);
return offset_;

25
deps/v8/src/safepoint-table.h

@ -180,6 +180,13 @@ class SafepointTable BASE_EMBEDDED {
class Safepoint BASE_EMBEDDED {
public:
typedef enum {
kSimple = 0,
kWithRegisters = 1 << 0,
kWithDoubles = 1 << 1,
kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
} Kind;
static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1;
@ -210,23 +217,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
// Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint(
Assembler* assembler,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
// Define a new safepoint with registers on the stack for the
// current position in the body and take the number of arguments on
// top of the registers into account.
Safepoint DefineSafepointWithRegisters(
Assembler* assembler,
int arguments,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
// Define a new safepoint with all double registers and the normal
// registers on the stack for the current position in the body and
// take the number of arguments on top of the registers into account.
// TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
// methods to one method that uses template arguments.
Safepoint DefineSafepointWithRegistersAndDoubles(
Assembler* assembler,
Safepoint::Kind kind,
int arguments,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex);

10
deps/v8/src/scanner-base.cc

@ -64,7 +64,8 @@ bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
// ----------------------------------------------------------------------------
// Scanner
Scanner::Scanner() { }
Scanner::Scanner()
: octal_pos_(kNoOctalLocation) { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@ -98,6 +99,7 @@ uc32 Scanner::ScanHexEscape(uc32 c, int length) {
// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
// ECMA-262. Other JS VMs support them.
uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
octal_pos_ = source_pos() - 1; // Already advanced
uc32 x = c - '0';
for (int i = 0; i < length; i++) {
int d = c0_ - '0';
@ -601,7 +603,11 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
kind = DECIMAL;
break;
}
if (c0_ < '0' || '7' < c0_) break;
if (c0_ < '0' || '7' < c0_) {
// Octal literal finished.
octal_pos_ = next_.location.beg_pos;
break;
}
AddLiteralCharAdvance();
}
}

18
deps/v8/src/scanner-base.h

@ -247,6 +247,9 @@ class LiteralBuffer {
// Generic functionality used by both JSON and JavaScript scanners.
class Scanner {
public:
// -1 is outside of the range of any real source code.
static const int kNoOctalLocation = -1;
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
class LiteralScope {
@ -271,15 +274,28 @@ class Scanner {
struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { }
bool IsValid() const {
return beg_pos >= 0 && end_pos >= beg_pos;
}
int beg_pos;
int end_pos;
};
static Location NoLocation() {
return Location(-1, -1);
}
// Returns the location information for the current token
// (the token returned by Next()).
Location location() const { return current_.location; }
Location peek_location() const { return next_.location; }
// Returns the location of the last seen octal literal
int octal_position() const { return octal_pos_; }
void clear_octal_position() { octal_pos_ = -1; }
// Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
@ -410,6 +426,8 @@ class Scanner {
// Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_;
// Start position of the octal literal last scanned.
int octal_pos_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;

5
deps/v8/src/scopes.cc

@ -726,7 +726,6 @@ void Scope::ResolveVariable(Scope* global_scope,
// Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object).
if (var != NULL) var->set_is_used(true);
var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else {
@ -834,8 +833,8 @@ bool Scope::MustAllocate(Variable* var) {
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
scope_calls_eval_ ||
inner_scope_calls_eval_)) {
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_)) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.

11
deps/v8/src/scopes.h

@ -288,6 +288,17 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(Handle<String> name) {
// During formal parameter list parsing the scope only contains
// two variables inserted at initialization: "this" and "arguments".
// "this" is an invalid parameter name and "arguments" is invalid parameter
// name in strict mode. Therefore looking up with the map which includes
// "this" and "arguments" in addition to all formal parameters is safe.
return variables_.Lookup(name) != NULL;
}
// ---------------------------------------------------------------------------
// Debugging.

4
deps/v8/src/serialize.cc

@ -335,7 +335,7 @@ void ExternalReferenceTable::PopulateTable() {
Add(ExternalReference::delete_handle_scope_extensions().address(),
RUNTIME_ENTRY,
3,
4,
"HandleScope::DeleteExtensions");
// Miscellaneous
@ -504,7 +504,7 @@ void ExternalReferenceTable::PopulateTable() {
"power_double_int_function");
Add(ExternalReference::arguments_marker_location().address(),
UNCLASSIFIED,
40,
41,
"Factory::arguments_marker().location()");
}

3
deps/v8/src/spaces.cc

@ -27,6 +27,7 @@
#include "v8.h"
#include "liveobjectlist-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"
@ -3125,6 +3126,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Free the chunk.
MarkCompactCollector::ReportDeleteIfNeeded(object);
LiveObjectList::ProcessNonLive(object);
size_ -= static_cast<int>(chunk_size);
objects_size_ -= object->Size();
page_count_--;

8
deps/v8/src/stub-cache.h

@ -427,7 +427,8 @@ class StubCompiler BASE_EMBEDDED {
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label);
Label* miss_label,
bool support_wrappers);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
@ -501,7 +502,7 @@ class StubCompiler BASE_EMBEDDED {
String* name,
Label* miss);
bool GenerateLoadCallback(JSObject* object,
MaybeObject* GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
@ -510,8 +511,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch3,
AccessorInfo* callback,
String* name,
Label* miss,
Failure** failure);
Label* miss);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,

14
deps/v8/src/top.cc

@ -72,7 +72,7 @@ void ThreadLocalTop::Initialize() {
handler_ = 0;
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
simulator_ = assembler::arm::Simulator::current();
simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
simulator_ = assembler::mips::Simulator::current();
#endif
@ -806,7 +806,7 @@ void Top::ComputeLocation(MessageLocation* target) {
}
bool Top::ShouldReturnException(bool* is_caught_externally,
bool Top::ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript) {
// Find the top-most try-catch handler.
StackHandler* handler =
@ -847,15 +847,15 @@ void Top::DoThrow(MaybeObject* exception,
Handle<Object> exception_handle(exception_object);
// Determine reporting and whether the exception is caught externally.
bool is_caught_externally = false;
bool is_out_of_memory = exception == Failure::OutOfMemoryException();
bool is_termination_exception = exception == Heap::termination_exception();
bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
// Only real objects can be caught by JS.
ASSERT(!catchable_by_javascript || is_object);
bool should_return_exception =
ShouldReturnException(&is_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_return_exception;
bool is_caught_externally = false;
bool should_report_exception =
ShouldReportException(&is_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception.
@ -1095,7 +1095,7 @@ char* Top::RestoreThread(char* from) {
// thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
thread_local_.simulator_ = assembler::arm::Simulator::current();
thread_local_.simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS
thread_local_.simulator_ = assembler::mips::Simulator::current();
#endif

6
deps/v8/src/top.h

@ -109,7 +109,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM
assembler::arm::Simulator* simulator_;
Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_;
#endif
@ -386,7 +386,9 @@ class Top {
static void DoThrow(MaybeObject* exception,
MessageLocation* location,
const char* message);
static bool ShouldReturnException(bool* is_caught_externally,
// Checks if exception should be reported and finds out if it's
// caught externally.
static bool ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript);
// Attempts to compute the current source location, storing the

4
deps/v8/src/type-info.cc

@ -171,7 +171,7 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
}
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;
@ -198,7 +198,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
}
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown;

10
deps/v8/src/type-info.h

@ -236,12 +236,6 @@ class CaseClause;
class TypeFeedbackOracle BASE_EMBEDDED {
public:
enum Side {
LEFT,
RIGHT,
RESULT
};
TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
bool LoadIsMonomorphic(Property* expr);
@ -261,8 +255,8 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares.
TypeInfo BinaryType(BinaryOperation* expr, Side side);
TypeInfo CompareType(CompareOperation* expr, Side side);
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
private:

2
deps/v8/src/v8.cc

@ -79,7 +79,7 @@ bool V8::Initialize(Deserializer* des) {
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM)
::assembler::arm::Simulator::Initialize();
Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize();
#endif

6
deps/v8/src/variables.cc

@ -112,12 +112,12 @@ Variable::Variable(Scope* scope,
: scope_(scope),
name_(name),
mode_(mode),
is_valid_LHS_(is_valid_LHS),
kind_(kind),
local_if_not_shadowed_(NULL),
rewrite_(NULL),
is_valid_LHS_(is_valid_LHS),
is_accessed_from_inner_scope_(false),
is_used_(false),
rewrite_(NULL) {
is_used_(false) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}

12
deps/v8/src/variables.h

@ -187,21 +187,23 @@ class Variable: public ZoneObject {
Scope* scope_;
Handle<String> name_;
Mode mode_;
bool is_valid_LHS_;
Kind kind_;
Variable* local_if_not_shadowed_;
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
bool is_used_;
// Static type information
StaticType type_;
// Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_;
// Valid as a LHS? (const and this are not valid LHS, for example)
bool is_valid_LHS_;
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
bool is_used_;
};

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
#define BUILD_NUMBER 10
#define BUILD_NUMBER 12
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

2
deps/v8/src/x64/assembler-x64-inl.h

@ -425,7 +425,7 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}

60
deps/v8/src/x64/assembler-x64.cc

@ -300,6 +300,34 @@ Operand::Operand(const Operand& operand, int32_t offset) {
}
}
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
// Start with only low three bits of base register. Initial decoding doesn't
// distinguish on the REX.B bit.
int base_code = buf_[0] & 0x07;
if (base_code == rsp.code()) {
// SIB byte present in buf_[1].
// Check the index register from the SIB byte + REX.X prefix.
int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
// Index code (including REX.X) of 0x04 (rsp) means no index register.
if (index_code != rsp.code() && index_code == code) return true;
// Add REX.B to get the full base register code.
base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
// A base register of 0x05 (rbp) with mod = 0 means no base register.
if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
return code == base_code;
} else {
// A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
// no base register.
if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
base_code |= ((rex_ & 0x01) << 3);
return code == base_code;
}
}
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@ -1949,6 +1977,14 @@ void Assembler::push(Immediate value) {
}
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x68);
emitl(imm32);
}
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2641,6 +2677,30 @@ void Assembler::movq(Register dst, XMMRegister src) {
}
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
emit(0x7F);
emit_sse_operand(src, dst);
}
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x6F);
emit_sse_operand(dst, src);
}
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);

23
deps/v8/src/x64/assembler-x64.h

@ -153,6 +153,7 @@ struct Register {
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
int code_;
private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters];
@ -390,11 +391,15 @@ class Operand BASE_EMBEDDED {
// this must not overflow.
Operand(const Operand& base, int32_t offset);
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
private:
byte rex_;
byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
// The number of bytes of buf_ in use.
byte len_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@ -590,6 +595,9 @@ class Assembler : public Malloced {
void popfq();
void push(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
@ -821,6 +829,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x23, dst, src);
}
void andl(Register dst, const Operand& src) {
arithmetic_op_32(0x23, dst, src);
}
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
@ -1205,6 +1217,9 @@ class Assembler : public Malloced {
void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@ -1245,10 +1260,6 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
// Use either movsd or movlpd.
// void movdbl(XMMRegister dst, const Operand& src);
// void movdbl(const Operand& dst, XMMRegister src);
// Debugging
void Print();

28
deps/v8/src/x64/builtins-x64.cc

@ -561,7 +561,33 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
__ int3();
// Enter an internal frame.
__ EnterInternalFrame();
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
NearLabel not_no_registers, not_tos_rax;
__ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
__ Abort("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {

16
deps/v8/src/x64/code-stubs-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -91,7 +91,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
__ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
@ -100,7 +101,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_));
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
@ -115,7 +116,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ movq(Operand(rax, Context::SlotOffset(i)), rbx);
}
@ -2773,8 +2774,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope,
int /* alignment_skew */) {
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
@ -2867,7 +2867,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame();
__ LeaveExitFrame(save_doubles_);
__ ret(0);
// Handling of failure.
@ -2976,7 +2976,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#else
int arg_stack_space = 0;
#endif
__ EnterExitFrame(arg_stack_space);
__ EnterExitFrame(arg_stack_space, save_doubles_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold

12
deps/v8/src/x64/codegen-x64.cc

@ -206,7 +206,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots();
// Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@ -7235,19 +7235,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to look up the context holding the named
// Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(rsi);
frame_->EmitPush(variable->name());
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
ASSERT(context.is_register());
frame_->EmitPush(context.reg());
context.Unuse();
frame_->EmitPush(variable->name());
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}

442
deps/v8/src/x64/deoptimizer-x64.cc

@ -41,18 +41,82 @@ namespace internal {
int Deoptimizer::table_entry_size_ = 10;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// UNIMPLEMENTED, for now just return.
return;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
// Get the optimized code.
Code* code = function->code();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
CodePatcher patcher(
code->instruction_start() + pc_offset + gap_code_size,
Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
}
}
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
CHECK(code->safepoint_table_start() >= last_pc_offset);
unsigned instructions = code->safepoint_table_start() - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
node->set_next(deoptimizing_code_list_);
deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
}
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
@ -64,20 +128,382 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
UNIMPLEMENTED();
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
ASSERT(frame_index >= 0 && frame_index < output_count_);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
intptr_t top_address;
if (is_bottommost) {
// 2 = context and function in the frame.
top_address =
input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; context\n",
top_address + output_offset, output_offset, value);
}
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
// The function for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; function\n",
top_address + output_offset, output_offset, value);
}
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
? Builtins::builtin(Builtins::NotifyDeoptimized)
: Builtins::builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
if (output_count_ - 1 == frame_index) iterator->Done();
}
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() {
// UNIMPLEMENTED, for now just return.
return;
GeneratePrologue();
CpuFeatures::Scope scope(SSE2);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ subq(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::toRegister(i);
__ push(r);
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
kDoubleRegsSize;
// When calling new_deoptimizer_function we need to pass the last argument
// on the stack on windows and in r8 on linux. The remaining arguments are
// all passed in registers (different ones on linux and windows though).
#ifdef _WIN64
Register arg4 = r9;
Register arg3 = r8;
Register arg2 = rdx;
Register arg1 = rcx;
#else
Register arg4 = rcx;
Register arg3 = rdx;
Register arg2 = rsi;
Register arg1 = rdi;
#endif
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
// Get the bailout id from the stack.
__ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object if possible
// and compute the fp-to-sp delta in register arg5.
if (type() == EAGER) {
__ Set(arg4, 0);
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
} else {
__ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
__ subq(arg5, rbp);
__ neg(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(5);
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(arg1, rax);
__ movq(arg2, Immediate(type()));
// Args 3 and 4 are already in the right registers.
// On windows put the argument on the stack (PrepareCallCFunction have
// created space for this). On linux pass the argument in r8.
#ifdef _WIN64
__ movq(Operand(rsp, 0 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
#endif
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
// Remove the bailout id from the stack.
if (type() == EAGER) {
__ addq(rsp, Immediate(kPointerSize));
} else {
__ addq(rsp, Immediate(2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(rax);
__ PrepareCallCFunction(1);
__ movq(arg1, rax);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
__ pop(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(rbx, src_offset));
}
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ push(Operand(rbx, FrameDescription::state_offset()));
}
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(rbx, offset));
}
// Restore the registers from the stack.
for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
Register r = Register::toRegister(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r.is(rsp)) {
ASSERT(i > 0);
r = Register::toRegister(i - 1);
}
__ pop(r);
}
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(r13, roots_address);
__ movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
// Return to the continuation point.
__ ret(0);
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UNIMPLEMENTED();
// Create a sequence of deoptimization entries.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ push_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64

8
deps/v8/src/x64/disasm-x64.cc

@ -1025,11 +1025,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x6F) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
if (opcode == 0x57) {

15
deps/v8/src/x64/full-codegen-x64.cc

@ -88,7 +88,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true;
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots();
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@ -710,6 +710,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
@ -3006,19 +3008,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else {
// Non-global variable. Call the runtime to look up the context
// where the variable was introduced.
// Non-global variable. Call the runtime to delete from the
// context where the variable was introduced.
__ push(context_register());
__ Push(var->name());
__ CallRuntime(Runtime::kLookupContext, 2);
__ push(rax);
__ Push(var->name());
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
}
break;

5
deps/v8/src/x64/ic-x64.cc

@ -397,7 +397,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@ -405,7 +405,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// -----------------------------------
Label miss;
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
support_wrappers);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}

660
deps/v8/src/x64/lithium-codegen-x64.cc

@ -37,157 +37,6 @@ namespace v8 {
namespace internal {
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()->
bool LCodeGen::GenerateCode() {
@ -339,6 +188,10 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
// Ensure that patching a deoptimization point won't overwrite the table.
for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
masm()->int3();
}
safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
}
@ -567,7 +420,24 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
Abort("Unimplemented: %s", "Deoptimiz");
RegisterEnvironmentForDeoptimization(environment);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
NearLabel done;
__ j(NegateCondition(cc), &done);
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&done);
}
}
@ -629,37 +499,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index);
kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
if (kind & Safepoint::kWithRegisters) {
// Register rsi always contains a pointer to the context.
safepoint.DefinePointerRegister(rsi);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint =
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register rsi always contains a pointer to the context.
safepoint.DefinePointerRegister(rsi);
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
deoptimization_index);
}
@ -682,86 +555,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
// xmm0 must always be a scratch register.
XMMRegister xmm_scratch = xmm0;
LUnallocated marker_operand(LUnallocated::NONE);
Register cpu_scratch = kScratchRegister;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
if (from->IsConstantOperand()) {
LConstantOperand* constant_from = LConstantOperand::cast(from);
if (to->IsRegister()) {
if (IsInteger32Constant(constant_from)) {
__ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToRegister(to), ToHandle(constant_from));
}
} else {
if (IsInteger32Constant(constant_from)) {
__ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToOperand(to), ToHandle(constant_from));
}
}
} else if (from == &marker_operand) {
if (to->IsRegister()) {
__ movq(ToRegister(to), cpu_scratch);
} else if (to->IsStackSlot()) {
__ movq(ToOperand(to), cpu_scratch);
} else if (to->IsDoubleRegister()) {
__ movsd(ToDoubleRegister(to), xmm_scratch);
} else {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), xmm_scratch);
}
} else if (to == &marker_operand) {
if (from->IsRegister()) {
__ movq(cpu_scratch, ToRegister(from));
} else if (from->IsStackSlot()) {
__ movq(cpu_scratch, ToOperand(from));
} else if (from->IsDoubleRegister()) {
__ movsd(xmm_scratch, ToDoubleRegister(from));
} else {
ASSERT(from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
}
} else if (from->IsRegister()) {
if (to->IsRegister()) {
__ movq(ToRegister(to), ToRegister(from));
} else {
__ movq(ToOperand(to), ToRegister(from));
}
} else if (to->IsRegister()) {
__ movq(ToRegister(to), ToOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ push(rax);
__ movq(rax, ToOperand(from));
__ movq(ToOperand(to), rax);
__ pop(rax);
} else if (from->IsDoubleRegister()) {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), ToDoubleRegister(from));
} else if (to->IsDoubleRegister()) {
ASSERT(from->IsDoubleStackSlot());
__ movsd(ToDoubleRegister(to), ToOperand(from));
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
__ movsd(ToOperand(to), xmm_scratch);
}
}
resolver_.Resolve(move);
}
@ -820,7 +614,22 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
Abort("Unimplemented: %s", "DoSubI");
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
__ subl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
__ subl(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
}
@ -1146,7 +955,18 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
Abort("Unimplemented: %s", "DoCmpJSObjectEq");
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
NearLabel different, done;
__ cmpq(left, right);
__ j(not_equal, &different);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&different);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
@ -1162,7 +982,45 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
void LCodeGen::DoIsNull(LIsNull* instr) {
Abort("Unimplemented: %s", "DoIsNull");
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
// Consider adding other type and representation tests too.
if (instr->hydrogen()->value()->type().IsSmi()) {
__ LoadRoot(result, Heap::kFalseValueRootIndex);
return;
}
__ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
__ movl(result, Immediate(Heap::kTrueValueRootIndex));
NearLabel load;
__ j(equal, &load);
__ movl(result, Immediate(Heap::kFalseValueRootIndex));
__ bind(&load);
__ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
} else {
NearLabel true_value, false_value, done;
__ j(equal, &true_value);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, &true_value);
__ JumpIfSmi(reg, &false_value);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
}
@ -1204,56 +1062,77 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object) {
ASSERT(!input.is(temp1));
ASSERT(!input.is(temp2));
ASSERT(!temp1.is(temp2));
ASSERT(!input.is(kScratchRegister));
__ JumpIfSmi(input, is_not_object);
__ Cmp(input, Factory::null_value());
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
__ movq(temp1, FieldOperand(input, HeapObject::kMapOffset));
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(temp1, Map::kBitFieldOffset),
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object);
__ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
__ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE));
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, is_not_object);
__ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE));
__ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Abort("Unimplemented: %s", "DoIsObject");
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
Condition true_cond = EmitIsObject(reg, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
Abort("Unimplemented: %s", "DoIsSmi");
LOperand* input_operand = instr->InputAt(0);
Register result = ToRegister(instr->result());
if (input_operand->IsRegister()) {
Register input = ToRegister(input_operand);
__ CheckSmiToIndicator(result, input);
} else {
Operand input = ToOperand(instr->InputAt(0));
__ CheckSmiToIndicator(result, input);
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
__ movq(result, Operand(kRootRegister, result, times_pointer_size,
Heap::kTrueValueRootIndex * kPointerSize));
}
@ -1386,7 +1265,25 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Abort("Unimplemented: %s", "DoClassOfTest");
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
NearLabel done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
__ j(not_equal, &is_false);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
}
@ -1408,7 +1305,12 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Abort("Unimplemented: %s", "DoCmpMapAndBranch");
Register reg = ToRegister(instr->InputAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
EmitBranch(true_block, false_block, equal);
}
@ -1493,12 +1395,32 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
Abort("Unimplemented: %s", "DoLoadGlobal");
Register result = ToRegister(instr->result());
if (result.is(rax)) {
__ load_rax(instr->hydrogen()->cell().location(),
RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
__ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
}
if (instr->hydrogen()->check_hole_value()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
}
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Abort("Unimplemented: %s", "DoStoreGlobal");
Register value = ToRegister(instr->InputAt(0));
if (value.is(rax)) {
__ store_rax(instr->hydrogen()->cell().location(),
RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
__ movq(kScratchRegister,
Handle<Object>::cast(instr->hydrogen()->cell()),
RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(Operand(kScratchRegister, 0), value);
}
}
@ -1508,7 +1430,14 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField");
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
} else {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
}
}
@ -1558,17 +1487,39 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
Abort("Unimplemented: %s", "DoPushArgument");
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(argument);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ push(Immediate(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
Abort("unsupported double immediate");
} else {
ASSERT(r.IsTagged());
__ Push(literal);
}
} else if (argument->IsRegister()) {
__ push(ToRegister(argument));
} else {
ASSERT(!argument->IsDoubleRegister());
__ push(ToOperand(argument));
}
}
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Abort("Unimplemented: %s", "DoGlobalObject");
Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
}
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Abort("Unimplemented: %s", "DoGlobalReceiver");
Register result = ToRegister(instr->result());
__ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
}
@ -1665,7 +1616,12 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
Abort("Unimplemented: %s", "DoCallNew");
ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Set(rax, instr->arity());
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
}
@ -1675,7 +1631,32 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Abort("Unimplemented: %s", "DoStoreNamedField");
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
int offset = instr->offset();
if (!instr->transition().is_null()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
}
// Do the store.
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWrite(temp, offset, value, object);
}
}
}
@ -1700,27 +1681,63 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
Abort("Unimplemented: %s", "DoInteger32ToDouble");
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
__ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
Abort("Unimplemented: %s", "DoNumberTagI");
}
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Abort("Unimplemented: %s", "DoDeferredNumberTagI");
__ Integer32ToSmi(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Abort("Unimplemented: %s", "DoNumberTagD");
class DeferredNumberTagD: public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
private:
LNumberTagD* instr_;
};
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->TempAt(0));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Abort("Unimplemented: %s", "DoDeferredNumberTagD");
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
__ Move(reg, Smi::FromInt(0));
__ PushSafepointRegisters();
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Ensure that value in rax survives popping registers.
__ movq(kScratchRegister, rax);
__ PopSafepointRegisters();
__ movq(reg, kScratchRegister);
}
@ -1737,7 +1754,34 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
LEnvironment* env) {
Abort("Unimplemented: %s", "EmitNumberUntagD");
NearLabel load_smi, heap_number, done;
// Smi check.
__ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(equal, &heap_number);
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN. Compute NaN as 0/0.
__ xorpd(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ jmp(&done);
// Heap number to XMM conversion.
__ bind(&heap_number);
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ jmp(&done);
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
__ cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
@ -1762,7 +1806,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Abort("Unimplemented: %s", "DoCheckSmi");
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Condition cc = masm()->CheckSmi(ToRegister(input));
if (instr->condition() != equal) {
cc = NegateCondition(cc);
}
DeoptimizeIf(cc, instr->environment());
}
@ -1772,12 +1822,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Abort("Unimplemented: %s", "DoCheckFunction");
ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
__ Cmp(reg, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
Abort("Unimplemented: %s", "DoCheckMap");
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
instr->hydrogen()->map());
DeoptimizeIf(not_equal, instr->environment());
}
@ -1787,7 +1845,29 @@ void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
Register reg = ToRegister(instr->TempAt(0));
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
}

55
deps/v8/src/x64/lithium-codegen-x64.h

@ -34,37 +34,15 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@ -80,10 +58,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1) {
osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@ -95,7 +87,6 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
@ -129,7 +120,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -190,13 +180,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
@ -209,6 +192,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
@ -231,8 +218,6 @@ class LCodeGen BASE_EMBEDDED {
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object);

320
deps/v8/src/x64/lithium-gap-resolver-x64.cc

@ -0,0 +1,320 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_X64)
#include "x64/lithium-gap-resolver-x64.h"
#include "x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
PerformMove(i);
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph. We use operand swaps to resolve cycles,
// which means that a call to PerformMove could change any source operand
// in the move graph.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack-allocated local. Recursion may allow
// multiple moves to be pending.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does
// not miss any). Assume there is a non-blocking move with source A
// and this move is blocked on source B and there is a swap of A and
// B. Then A and B must be involved in the same cycle (or they would
// not be swapped). Since this move's destination is B and there is
// only a single incoming edge to an operand, this move must also be
// involved in the same cycle. In that case, the blocking move will
// be created but will be "pending" when we return from PerformMove.
PerformMove(i);
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// This move's source may have changed due to swaps to resolve cycles and
// so it may now be the last move in the cycle. If so remove it.
if (moves_[index].source()->Equals(destination)) {
moves_[index].Eliminate();
return;
}
// The move may be blocked on a (at most one) pending move, in which case
// we have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
EmitSwap(index);
return;
}
}
// This move is not blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ movq(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movq(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ movq(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movq(kScratchRegister, src);
__ movq(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
if (cgen_->IsInteger32Constant(constant_source)) {
// Allow top 32 bits of an untagged Integer32 to be arbitrary.
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
}
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ movsd(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(cgen_->ToOperand(destination), src);
}
} else if (source->IsDoubleStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
__ movsd(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(xmm0, src);
__ movsd(cgen_->ToOperand(destination), xmm0);
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
void LGapResolver::EmitSwap(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
__ xchg(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
// Swap a general-purpose register and a stack slot.
Register reg =
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
__ movq(kScratchRegister, mem);
__ movq(mem, reg);
__ movq(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
__ movq(kScratchRegister, dst);
__ movsd(dst, xmm0);
__ movq(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
__ movsd(xmm0, source_reg);
__ movsd(source_reg, destination_reg);
__ movsd(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
LOperand* other = source->IsDoubleRegister() ? destination : source;
ASSERT(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
__ movsd(xmm0, other_operand);
__ movsd(other_operand, reg);
__ movsd(reg, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
}
// The swap of source and destination has executed a move from source to
// destination.
moves_[index].Eliminate();
// Any unperformed (including pending) move with a source of either
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(source)) {
moves_[i].set_source(destination);
} else if (other_move.Blocks(destination)) {
moves_[i].set_source(source);
}
}
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64

74
deps/v8/src/x64/lithium-gap-resolver-x64.h

@ -0,0 +1,74 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
};
} } // namespace v8::internal
#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_

188
deps/v8/src/x64/lithium-x64.cc

@ -974,12 +974,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1048,20 +1043,19 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
Abort("Unimplemented: %s", "DoPushArgument");
return NULL;
++argument_count_;
LOperand* argument = UseOrConstant(instr->argument());
return new LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
Abort("Unimplemented: %s", "DoGlobalObject");
return NULL;
return DefineAsRegister(new LGlobalObject);
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
Abort("Unimplemented: %s", "DoGlobalReceiver");
return NULL;
return DefineAsRegister(new LGlobalReceiver);
}
@ -1103,8 +1097,10 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
Abort("Unimplemented: %s", "DoCallNew");
return NULL;
LOperand* constructor = UseFixed(instr->constructor(), rdi);
argument_count_ -= instr->argument_count();
LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@ -1181,8 +1177,23 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
Abort("Unimplemented: %s", "DoSub");
return NULL;
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
}
@ -1243,26 +1254,34 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
Abort("Unimplemented: %s", "DoCompareJSObjectEq");
return NULL;
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
Abort("Unimplemented: %s", "DoIsNull");
return NULL;
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value));
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
Abort("Unimplemented: %s", "DoIsObject");
return NULL;
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LIsObject(value));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
Abort("Unimplemented: %s", "DoIsSmi");
return NULL;
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
return DefineAsRegister(new LIsSmi(value));
}
@ -1316,14 +1335,69 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Abort("Unimplemented: %s", "DoChange");
Representation from = instr->from();
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
LOperand* xmm_temp =
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
return DefineSameAsFirst(new LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
!CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
}
}
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
Abort("Unimplemented: %s", "DoCheckNonSmi");
return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value, zero));
}
@ -1334,26 +1408,28 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
Abort("Unimplemented: %s", "DoCheckPrototypeMaps");
return NULL;
LOperand* temp = TempRegister();
LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
Abort("Unimplemented: %s", "DoCheckSmi");
return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value, not_zero));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
Abort("Unimplemented: %s", "DoCheckFunction");
return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
Abort("Unimplemented: %s", "DoCheckMap");
return NULL;
LOperand* value = UseRegisterAtStart(instr->value());
LCheckMap* result = new LCheckMap(value);
return AssignEnvironment(result);
}
@ -1381,15 +1457,15 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
Abort("Unimplemented: %s", "DoLoadGlobal");
return NULL;
LLoadGlobal* result = new LLoadGlobal;
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
Abort("Unimplemented: %s", "DoStoreGlobal");
return NULL;
}
return new LStoreGlobal(UseRegisterAtStart(instr->value()));}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
@ -1399,8 +1475,9 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField");
return NULL;
ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new LLoadNamedField(obj));
}
@ -1450,8 +1527,22 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
Abort("Unimplemented: %s", "DoStoreNamedField");
return NULL;
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
return new LStoreNamedField(obj, val, temp);
}
@ -1588,7 +1679,14 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
Abort("Unimplemented: %s", "DoEnterInlined");
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
false,
undefined);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save