Browse Source

Upgrade V8 to 3.0.12

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
7eaa956bae
  1. 12
      deps/v8/0002-Patch-for-oprofile.patch
  2. 1
      deps/v8/AUTHORS
  3. 22
      deps/v8/ChangeLog
  4. 22
      deps/v8/SConstruct
  5. 10
      deps/v8/src/SConscript
  6. 247
      deps/v8/src/arm/assembler-arm.cc
  7. 168
      deps/v8/src/arm/assembler-arm.h
  8. 6
      deps/v8/src/arm/builtins-arm.cc
  9. 748
      deps/v8/src/arm/code-stubs-arm.cc
  10. 111
      deps/v8/src/arm/code-stubs-arm.h
  11. 2
      deps/v8/src/arm/codegen-arm-inl.h
  12. 40
      deps/v8/src/arm/codegen-arm.cc
  13. 10
      deps/v8/src/arm/constants-arm.cc
  14. 588
      deps/v8/src/arm/constants-arm.h
  15. 2
      deps/v8/src/arm/cpu-arm.cc
  16. 11
      deps/v8/src/arm/deoptimizer-arm.cc
  17. 459
      deps/v8/src/arm/disasm-arm.cc
  18. 11
      deps/v8/src/arm/frames-arm.cc
  19. 18
      deps/v8/src/arm/frames-arm.h
  20. 79
      deps/v8/src/arm/full-codegen-arm.cc
  21. 29
      deps/v8/src/arm/ic-arm.cc
  22. 8
      deps/v8/src/arm/jump-target-arm.cc
  23. 29
      deps/v8/src/arm/lithium-arm.cc
  24. 11
      deps/v8/src/arm/lithium-arm.h
  25. 162
      deps/v8/src/arm/lithium-codegen-arm.cc
  26. 4
      deps/v8/src/arm/lithium-codegen-arm.h
  27. 144
      deps/v8/src/arm/macro-assembler-arm.cc
  28. 42
      deps/v8/src/arm/macro-assembler-arm.h
  29. 478
      deps/v8/src/arm/simulator-arm.cc
  30. 84
      deps/v8/src/arm/simulator-arm.h
  31. 108
      deps/v8/src/arm/stub-cache-arm.cc
  32. 3
      deps/v8/src/assembler.h
  33. 45
      deps/v8/src/ast.cc
  34. 20
      deps/v8/src/ast.h
  35. 3
      deps/v8/src/bootstrapper.cc
  36. 7
      deps/v8/src/builtins.cc
  37. 1
      deps/v8/src/builtins.h
  38. 16
      deps/v8/src/code-stubs.h
  39. 19
      deps/v8/src/deoptimizer.h
  40. 6
      deps/v8/src/frames.cc
  41. 6
      deps/v8/src/heap.cc
  42. 31
      deps/v8/src/hydrogen-instructions.cc
  43. 310
      deps/v8/src/hydrogen-instructions.h
  44. 46
      deps/v8/src/hydrogen.cc
  45. 2
      deps/v8/src/hydrogen.h
  46. 111
      deps/v8/src/ia32/code-stubs-ia32.cc
  47. 1
      deps/v8/src/ia32/code-stubs-ia32.h
  48. 12
      deps/v8/src/ia32/codegen-ia32.cc
  49. 112
      deps/v8/src/ia32/deoptimizer-ia32.cc
  50. 15
      deps/v8/src/ia32/full-codegen-ia32.cc
  51. 6
      deps/v8/src/ia32/ic-ia32.cc
  52. 51
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  53. 4
      deps/v8/src/ia32/lithium-codegen-ia32.h
  54. 11
      deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
  55. 7
      deps/v8/src/ia32/lithium-ia32.cc
  56. 8
      deps/v8/src/ia32/macro-assembler-ia32.cc
  57. 203
      deps/v8/src/ia32/stub-cache-ia32.cc
  58. 59
      deps/v8/src/ic.cc
  59. 3
      deps/v8/src/ic.h
  60. 10
      deps/v8/src/mark-compact.cc
  61. 36
      deps/v8/src/messages.js
  62. 3
      deps/v8/src/objects-inl.h
  63. 215
      deps/v8/src/parser.cc
  64. 8
      deps/v8/src/parser.h
  65. 2
      deps/v8/src/platform-freebsd.cc
  66. 19
      deps/v8/src/runtime-profiler.cc
  67. 49
      deps/v8/src/runtime.cc
  68. 2
      deps/v8/src/runtime.h
  69. 43
      deps/v8/src/safepoint-table.cc
  70. 25
      deps/v8/src/safepoint-table.h
  71. 10
      deps/v8/src/scanner-base.cc
  72. 18
      deps/v8/src/scanner-base.h
  73. 5
      deps/v8/src/scopes.cc
  74. 11
      deps/v8/src/scopes.h
  75. 4
      deps/v8/src/serialize.cc
  76. 3
      deps/v8/src/spaces.cc
  77. 24
      deps/v8/src/stub-cache.h
  78. 14
      deps/v8/src/top.cc
  79. 6
      deps/v8/src/top.h
  80. 4
      deps/v8/src/type-info.cc
  81. 10
      deps/v8/src/type-info.h
  82. 2
      deps/v8/src/v8.cc
  83. 6
      deps/v8/src/variables.cc
  84. 12
      deps/v8/src/variables.h
  85. 2
      deps/v8/src/version.cc
  86. 2
      deps/v8/src/x64/assembler-x64-inl.h
  87. 60
      deps/v8/src/x64/assembler-x64.cc
  88. 23
      deps/v8/src/x64/assembler-x64.h
  89. 28
      deps/v8/src/x64/builtins-x64.cc
  90. 16
      deps/v8/src/x64/code-stubs-x64.cc
  91. 12
      deps/v8/src/x64/codegen-x64.cc
  92. 442
      deps/v8/src/x64/deoptimizer-x64.cc
  93. 8
      deps/v8/src/x64/disasm-x64.cc
  94. 15
      deps/v8/src/x64/full-codegen-x64.cc
  95. 5
      deps/v8/src/x64/ic-x64.cc
  96. 662
      deps/v8/src/x64/lithium-codegen-x64.cc
  97. 55
      deps/v8/src/x64/lithium-codegen-x64.h
  98. 320
      deps/v8/src/x64/lithium-gap-resolver-x64.cc
  99. 74
      deps/v8/src/x64/lithium-gap-resolver-x64.h
  100. 188
      deps/v8/src/x64/lithium-x64.cc

12
deps/v8/0002-Patch-for-oprofile.patch

@ -1,12 +0,0 @@
--- SConstruct 2010-12-16 11:49:26.000000000 -0800
+++ /tmp/SConstruct 2010-12-16 11:48:23.000000000 -0800
@@ -225,7 +225,8 @@
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
- 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
+ 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT'],
+ 'LIBS': ['opagent', 'bfd']
}
},
'msvc': {

1
deps/v8/AUTHORS

@ -34,3 +34,4 @@ Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com> Ryan Dahl <coldredlemur@gmail.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com> Vlad Burlik <vladbph@gmail.com>
Mike Gilbert <floppymaster@gmail.com>

22
deps/v8/ChangeLog

@ -1,8 +1,26 @@
2011-01-28: Version 3.0.12
Added support for strict mode parameter and object property
validation.
Fixed a couple of crash bugs.
2011-01-25: Version 3.0.11
Fixed a bug in deletion of lookup slots that could cause global
variables to be accidentally deleted (http://crbug.com/70066).
Added support for strict mode octal literal verification.
Fixed a couple of crash bugs (issues 1070 and 1071).
2011-01-24: Version 3.0.10 2011-01-24: Version 3.0.10
Fixed External::Wrap for 64-bit addresses (issue 1037). Fixed External::Wrap for 64-bit addresses (issue 1037).
Fixed incorrect .arguments variable proxy handling in the full Fixed incorrect .arguments variable proxy handling in the full
code generator (issue 1060). code generator (issue 1060).
Introduced partial strict mode support. Introduced partial strict mode support.
@ -11,7 +29,7 @@
(issue http://crbug.com/70334). (issue http://crbug.com/70334).
Fixed incorrect rounding for float-to-integer conversions for external Fixed incorrect rounding for float-to-integer conversions for external
array types, which implement the Typed Array spec array types, which implement the Typed Array spec
(issue http://crbug.com/50972). (issue http://crbug.com/50972).
Performance improvements on the IA32 platform. Performance improvements on the IA32 platform.

22
deps/v8/SConstruct

@ -32,7 +32,7 @@ import os
from os.path import join, dirname, abspath from os.path import join, dirname, abspath
from types import DictType, StringTypes from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath) root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools')) sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment # ANDROID_TOP is the top of the Android checkout, fetched from the environment
@ -127,12 +127,16 @@ LIBRARY_FLAGS = {
}, },
'inspector:on': { 'inspector:on': {
'CPPDEFINES': ['INSPECTOR'], 'CPPDEFINES': ['INSPECTOR'],
},
'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
} }
}, },
'gcc': { 'gcc': {
'all': { 'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'], 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
}, },
'visibility:hidden': { 'visibility:hidden': {
# Use visibility=default to disable this. # Use visibility=default to disable this.
@ -325,7 +329,7 @@ V8_EXTRA_FLAGS = {
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800'] 'WARNINGFLAGS': ['/W3', '/WX', '/wd4351', '/wd4355', '/wd4800']
}, },
'library:shared': { 'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'], 'CPPDEFINES': ['BUILDING_V8_SHARED'],
@ -751,6 +755,11 @@ SIMPLE_OPTIONS = {
'default': 'off', 'default': 'off',
'help': 'enable inspector features' 'help': 'enable inspector features'
}, },
'liveobjectlist': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable live object list features in the debugger'
},
'soname': { 'soname': {
'values': ['on', 'off'], 'values': ['on', 'off'],
'default': 'off', 'default': 'off',
@ -1008,6 +1017,13 @@ def PostprocessOptions(options, os):
# Print a warning if native regexp is specified for mips # Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips" print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted' options['regexp'] = 'interpreted'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger
print "Warning: forcing debuggersupport on for liveobjectlist"
options['debuggersupport'] = 'on'
options['inspector'] = 'on'
options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):

10
deps/v8/src/SConscript

@ -95,6 +95,7 @@ SOURCES = {
mark-compact.cc mark-compact.cc
messages.cc messages.cc
objects.cc objects.cc
objects-printer.cc
objects-visiting.cc objects-visiting.cc
oprofile-agent.cc oprofile-agent.cc
parser.cc parser.cc
@ -216,8 +217,9 @@ SOURCES = {
x64/full-codegen-x64.cc x64/full-codegen-x64.cc
x64/ic-x64.cc x64/ic-x64.cc
x64/jump-target-x64.cc x64/jump-target-x64.cc
x64/lithium-x64.cc
x64/lithium-codegen-x64.cc x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc
x64/macro-assembler-x64.cc x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc x64/register-allocator-x64.cc
@ -236,10 +238,8 @@ SOURCES = {
'os:win32': ['platform-win32.cc'], 'os:win32': ['platform-win32.cc'],
'mode:release': [], 'mode:release': [],
'mode:debug': [ 'mode:debug': [
'objects-debug.cc', 'objects-printer.cc', 'prettyprinter.cc', 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
'regexp-macro-assembler-tracer.cc' ]
],
'objectprint:on': ['objects-printer.cc']
} }

247
deps/v8/src/arm/assembler-arm.cc

@ -213,74 +213,29 @@ MemOperand::MemOperand(Register rn, Register rm,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler. // Specific instructions, constants, and masks.
// Instruction encoding bits.
enum {
H = 1 << 5, // halfword (or byte)
S6 = 1 << 6, // signed (or unsigned)
L = 1 << 20, // load (or store)
S = 1 << 20, // set condition code (or leave unchanged)
W = 1 << 21, // writeback base register (or leave unchanged)
A = 1 << 21, // accumulate in multiply instruction (or not)
B = 1 << 22, // unsigned byte (or word)
N = 1 << 22, // long (or short)
U = 1 << 23, // positive (or negative) offset/index
P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
I = 1 << 25, // immediate shifter operand (or not)
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
B23 = 1 << 23,
B24 = 1 << 24,
B25 = 1 << 25,
B26 = 1 << 26,
B27 = 1 << 27,
// Instruction bit masks.
RdMask = 15 << 12, // in str instruction
CondMask = 15 << 28,
CoprocessorMask = 15 << 8,
OpCodeMask = 15 << 21, // in data-processing instructions
Imm24Mask = (1 << 24) - 1,
Off12Mask = (1 << 12) - 1,
// Reserved condition.
nv = 15 << 28
};
// add(sp, sp, 4) instruction (aka Pop()) // add(sp, sp, 4) instruction (aka Pop())
static const Instr kPopInstruction = const Instr kPopInstruction =
al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12; al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r)) // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded. // register r is not encoded.
static const Instr kPushRegPattern = const Instr kPushRegPattern =
al | B26 | 4 | NegPreIndex | sp.code() * B16; al | B26 | 4 | NegPreIndex | sp.code() * B16;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r)) // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded. // register r is not encoded.
static const Instr kPopRegPattern = const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16; al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc // mov lr, pc
const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12; const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
// ldr rd, [pc, #offset] // ldr rd, [pc, #offset]
const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16; const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16; const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
// blxcc rm // blxcc rm
const Instr kBlxRegMask = const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern = const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4; B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21; const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22; const Instr kMovMvnFlip = B22;
@ -292,33 +247,28 @@ const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20; const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21; const Instr kCmpCmnFlip = B21;
const Instr kALUMask = 0x6f * B21;
const Instr kAddPattern = 0x4 * B21;
const Instr kSubPattern = 0x2 * B21;
const Instr kBicPattern = 0xe * B21;
const Instr kAndPattern = 0x0 * B21;
const Instr kAddSubFlip = 0x6 * B21; const Instr kAddSubFlip = 0x6 * B21;
const Instr kAndBicFlip = 0xe * B21; const Instr kAndBicFlip = 0xe * B21;
// A mask for the Rd register for push, pop, ldr, str instructions. // A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000; const Instr kLdrRegFpOffsetPattern =
static const int kRdShift = 12;
static const Instr kLdrRegFpOffsetPattern =
al | B26 | L | Offset | fp.code() * B16; al | B26 | L | Offset | fp.code() * B16;
static const Instr kStrRegFpOffsetPattern = const Instr kStrRegFpOffsetPattern =
al | B26 | Offset | fp.code() * B16; al | B26 | Offset | fp.code() * B16;
static const Instr kLdrRegFpNegOffsetPattern = const Instr kLdrRegFpNegOffsetPattern =
al | B26 | L | NegOffset | fp.code() * B16; al | B26 | L | NegOffset | fp.code() * B16;
static const Instr kStrRegFpNegOffsetPattern = const Instr kStrRegFpNegOffsetPattern =
al | B26 | NegOffset | fp.code() * B16; al | B26 | NegOffset | fp.code() * B16;
static const Instr kLdrStrInstrTypeMask = 0xffff0000; const Instr kLdrStrInstrTypeMask = 0xffff0000;
static const Instr kLdrStrInstrArgumentMask = 0x0000ffff; const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
static const Instr kLdrStrOffsetMask = 0x00000fff; const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer. // Spare buffer.
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL; static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size) Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this), : positions_recorder_(this),
allow_peephole_optimization_(false) { allow_peephole_optimization_(false) {
@ -411,7 +361,7 @@ int Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr)); ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it // Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes. // with 4 to get the offset in bytes.
return ((instr & Imm24Mask) << 8) >> 6; return ((instr & kImm24Mask) << 8) >> 6;
} }
@ -423,7 +373,7 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr)); ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23; bool positive = (instr & B23) == B23;
int offset = instr & Off12Mask; // Zero extended offset. int offset = instr & kOff12Mask; // Zero extended offset.
return positive ? offset : -offset; return positive ? offset : -offset;
} }
@ -436,7 +386,7 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added. // Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0); instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset. // Set the actual offset.
return (instr & ~Off12Mask) | offset; return (instr & ~kOff12Mask) | offset;
} }
@ -453,7 +403,7 @@ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
// Set bit indicating whether the offset should be added. // Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0); instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset. // Set the actual offset.
return (instr & ~Off12Mask) | offset; return (instr & ~kOff12Mask) | offset;
} }
@ -467,13 +417,13 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(offset >= 0); ASSERT(offset >= 0);
ASSERT(is_uint12(offset)); ASSERT(is_uint12(offset));
// Set the offset. // Set the offset.
return (instr & ~Off12Mask) | offset; return (instr & ~kOff12Mask) | offset;
} }
Register Assembler::GetRd(Instr instr) { Register Assembler::GetRd(Instr instr) {
Register reg; Register reg;
reg.code_ = ((instr & kRdMask) >> kRdShift); reg.code_ = Instruction::RdValue(instr);
return reg; return reg;
} }
@ -511,7 +461,7 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
bool Assembler::IsLdrPcImmediateOffset(Instr instr) { bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a // Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12]. // ldr<cond> <Rd>, [pc +/- offset_12].
return (instr & 0x0f7f0000) == 0x051f0000; return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
} }
@ -532,13 +482,14 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) { int Assembler::target_at(int pos) {
Instr instr = instr_at(pos); Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) { if ((instr & ~kImm24Mask) == 0) {
// Emitted label constant, not part of a branch. // Emitted label constant, not part of a branch.
return instr - (Code::kHeaderSize - kHeapObjectTag); return instr - (Code::kHeaderSize - kHeapObjectTag);
} }
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6; int imm26 = ((instr & kImm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0) { if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
((instr & B24) != 0)) {
// blx uses bit 24 to encode bit 2 of imm26 // blx uses bit 24 to encode bit 2 of imm26
imm26 += 2; imm26 += 2;
} }
@ -548,7 +499,7 @@ int Assembler::target_at(int pos) {
void Assembler::target_at_put(int pos, int target_pos) { void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos); Instr instr = instr_at(pos);
if ((instr & ~Imm24Mask) == 0) { if ((instr & ~kImm24Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0); ASSERT(target_pos == kEndOfChain || target_pos >= 0);
// Emitted label constant, not part of a branch. // Emitted label constant, not part of a branch.
// Make label relative to Code* of generated Code object. // Make label relative to Code* of generated Code object.
@ -557,17 +508,17 @@ void Assembler::target_at_put(int pos, int target_pos) {
} }
int imm26 = target_pos - (pos + kPcLoadDelta); int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
if ((instr & CondMask) == nv) { if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26 // blx uses bit 24 to encode bit 2 of imm26
ASSERT((imm26 & 1) == 0); ASSERT((imm26 & 1) == 0);
instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24; instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
} else { } else {
ASSERT((imm26 & 3) == 0); ASSERT((imm26 & 3) == 0);
instr &= ~Imm24Mask; instr &= ~kImm24Mask;
} }
int imm24 = imm26 >> 2; int imm24 = imm26 >> 2;
ASSERT(is_int24(imm24)); ASSERT(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & Imm24Mask)); instr_at_put(pos, instr | (imm24 & kImm24Mask));
} }
@ -582,14 +533,14 @@ void Assembler::print(Label* L) {
while (l.is_linked()) { while (l.is_linked()) {
PrintF("@ %d ", l.pos()); PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos()); Instr instr = instr_at(l.pos());
if ((instr & ~Imm24Mask) == 0) { if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n"); PrintF("value\n");
} else { } else {
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
int cond = instr & CondMask; Condition cond = Instruction::ConditionField(instr);
const char* b; const char* b;
const char* c; const char* c;
if (cond == nv) { if (cond == kSpecialCondition) {
b = "blx"; b = "blx";
c = ""; c = "";
} else { } else {
@ -731,14 +682,14 @@ static bool fits_shifter(uint32_t imm32,
} }
} else { } else {
Instr alu_insn = (*instr & kALUMask); Instr alu_insn = (*instr & kALUMask);
if (alu_insn == kAddPattern || if (alu_insn == ADD ||
alu_insn == kSubPattern) { alu_insn == SUB) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAddSubFlip; *instr ^= kAddSubFlip;
return true; return true;
} }
} else if (alu_insn == kAndPattern || } else if (alu_insn == AND ||
alu_insn == kBicPattern) { alu_insn == BIC) {
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kAndBicFlip; *instr ^= kAndBicFlip;
return true; return true;
@ -782,7 +733,7 @@ void Assembler::addrmod1(Instr instr,
Register rd, Register rd,
const Operand& x) { const Operand& x) {
CheckBuffer(); CheckBuffer();
ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0); ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// Immediate. // Immediate.
uint32_t rotate_imm; uint32_t rotate_imm;
@ -794,8 +745,8 @@ void Assembler::addrmod1(Instr instr,
// However, if the original instruction is a 'mov rd, x' (not setting the // However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'. // condition code), then replace it with a 'ldr rd, [pc]'.
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask); Condition cond = Instruction::ConditionField(instr);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) { if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_); RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond); ldr(rd, MemOperand(pc, 0), cond);
@ -836,7 +787,7 @@ void Assembler::addrmod1(Instr instr,
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | B | L)) == B26); ASSERT((instr & ~(kCondMask | B | L)) == B26);
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
// Immediate offset. // Immediate offset.
@ -849,8 +800,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed. // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
static_cast<Condition>(instr & CondMask));
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} }
@ -869,7 +819,7 @@ void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7)); ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
ASSERT(x.rn_.is_valid()); ASSERT(x.rn_.is_valid());
int am = x.am_; int am = x.am_;
if (!x.rm_.is_valid()) { if (!x.rm_.is_valid()) {
@ -883,8 +833,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// Immediate offset cannot be encoded, load it first to register ip // Immediate offset cannot be encoded, load it first to register ip
// rn (and rd in a load) should never be ip, or will be trashed. // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.offset_), LeaveCC, mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
static_cast<Condition>(instr & CondMask));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} }
@ -895,7 +844,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
// rn (and rd in a load) should never be ip, or will be trashed. // rn (and rd in a load) should never be ip, or will be trashed.
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
static_cast<Condition>(instr & CondMask)); Instruction::ConditionField(instr));
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
return; return;
} else { } else {
@ -909,7 +858,7 @@ void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
ASSERT((instr & ~(CondMask | P | U | W | L)) == B27); ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
ASSERT(rl != 0); ASSERT(rl != 0);
ASSERT(!rn.is(pc)); ASSERT(!rn.is(pc));
emit(instr | rn.code()*B16 | rl); emit(instr | rn.code()*B16 | rl);
@ -919,7 +868,7 @@ void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
// Unindexed addressing is not encoded by this function. // Unindexed addressing is not encoded by this function.
ASSERT_EQ((B27 | B26), ASSERT_EQ((B27 | B26),
(instr & ~(CondMask | CoprocessorMask | P | U | N | W | L))); (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_; int am = x.am_;
int offset_8 = x.offset_; int offset_8 = x.offset_;
@ -982,7 +931,7 @@ void Assembler::b(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0); ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24)); ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & Imm24Mask)); emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) { if (cond == al) {
// Dead code is a good location to emit the constant pool. // Dead code is a good location to emit the constant pool.
@ -996,7 +945,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
ASSERT((branch_offset & 3) == 0); ASSERT((branch_offset & 3) == 0);
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24)); ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask)); emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
} }
@ -1006,21 +955,21 @@ void Assembler::blx(int branch_offset) { // v5 and above
int h = ((branch_offset & 2) >> 1)*B24; int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2; int imm24 = branch_offset >> 2;
ASSERT(is_int24(imm24)); ASSERT(is_int24(imm24));
emit(nv | B27 | B25 | h | (imm24 & Imm24Mask)); emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
} }
void Assembler::blx(Register target, Condition cond) { // v5 and above void Assembler::blx(Register target, Condition cond) { // v5 and above
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc)); ASSERT(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
} }
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
} }
@ -1028,31 +977,31 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
void Assembler::and_(Register dst, Register src1, const Operand& src2, void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2); addrmod1(cond | AND | s, src1, dst, src2);
} }
void Assembler::eor(Register dst, Register src1, const Operand& src2, void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 1*B21 | s, src1, dst, src2); addrmod1(cond | EOR | s, src1, dst, src2);
} }
void Assembler::sub(Register dst, Register src1, const Operand& src2, void Assembler::sub(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 2*B21 | s, src1, dst, src2); addrmod1(cond | SUB | s, src1, dst, src2);
} }
void Assembler::rsb(Register dst, Register src1, const Operand& src2, void Assembler::rsb(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 3*B21 | s, src1, dst, src2); addrmod1(cond | RSB | s, src1, dst, src2);
} }
void Assembler::add(Register dst, Register src1, const Operand& src2, void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 4*B21 | s, src1, dst, src2); addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop() // Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al); // str(src, MemOperand(sp, 4, NegPreIndex), al);
@ -1061,7 +1010,7 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
if (can_peephole_optimize(2) && if (can_peephole_optimize(2) &&
// Pattern. // Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction && instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) { (instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize; pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) { if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset()); PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
@ -1072,45 +1021,45 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
void Assembler::adc(Register dst, Register src1, const Operand& src2, void Assembler::adc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 5*B21 | s, src1, dst, src2); addrmod1(cond | ADC | s, src1, dst, src2);
} }
void Assembler::sbc(Register dst, Register src1, const Operand& src2, void Assembler::sbc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 6*B21 | s, src1, dst, src2); addrmod1(cond | SBC | s, src1, dst, src2);
} }
void Assembler::rsc(Register dst, Register src1, const Operand& src2, void Assembler::rsc(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 7*B21 | s, src1, dst, src2); addrmod1(cond | RSC | s, src1, dst, src2);
} }
void Assembler::tst(Register src1, const Operand& src2, Condition cond) { void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 8*B21 | S, src1, r0, src2); addrmod1(cond | TST | S, src1, r0, src2);
} }
void Assembler::teq(Register src1, const Operand& src2, Condition cond) { void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 9*B21 | S, src1, r0, src2); addrmod1(cond | TEQ | S, src1, r0, src2);
} }
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 10*B21 | S, src1, r0, src2); addrmod1(cond | CMP | S, src1, r0, src2);
} }
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | 11*B21 | S, src1, r0, src2); addrmod1(cond | CMN | S, src1, r0, src2);
} }
void Assembler::orr(Register dst, Register src1, const Operand& src2, void Assembler::orr(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 12*B21 | s, src1, dst, src2); addrmod1(cond | ORR | s, src1, dst, src2);
} }
@ -1122,7 +1071,7 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
// the mov instruction. They must be generated using nop(int/NopMarkerTypes) // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
// or MarkCode(int/NopMarkerTypes) pseudo instructions. // or MarkCode(int/NopMarkerTypes) pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | 13*B21 | s, r0, dst, src); addrmod1(cond | MOV | s, r0, dst, src);
} }
@ -1139,12 +1088,12 @@ void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
void Assembler::bic(Register dst, Register src1, const Operand& src2, void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2); addrmod1(cond | BIC | s, src1, dst, src2);
} }
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
addrmod1(cond | 15*B21 | s, r0, dst, src); addrmod1(cond | MVN | s, r0, dst, src);
} }
@ -1222,7 +1171,7 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
// v5 and above. // v5 and above.
ASSERT(!dst.is(pc) && !src.is(pc)); ASSERT(!dst.is(pc) && !src.is(pc));
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
15*B8 | B4 | src.code()); 15*B8 | CLZ | src.code());
} }
@ -1376,7 +1325,7 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize); Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) { if (IsPush(push_instr) && IsPop(pop_instr)) {
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) { if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers, // For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move. // we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry // push ry, pop rx --> mov rx, ry
@ -1457,8 +1406,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
IsPop(mem_read_instr)) { IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) || if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) { IsLdrRegFpNegOffset(ldr_instr))) {
if ((mem_write_instr & kRdMask) == if (Instruction::RdValue(mem_write_instr) ==
(mem_read_instr & kRdMask)) { Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register, // Pattern: push & pop from/to same register,
// with a fp+offset ldr in between // with a fp+offset ldr in between
// //
@ -1473,7 +1422,8 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// else // else
// ldr rz, [fp, #-24] // ldr rz, [fp, #-24]
if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) { if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize; pc_ -= 3 * kInstrSize;
} else { } else {
pc_ -= 3 * kInstrSize; pc_ -= 3 * kInstrSize;
@ -1503,22 +1453,23 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
// ldr rz, [fp, #-24] // ldr rz, [fp, #-24]
Register reg_pushed, reg_popped; Register reg_pushed, reg_popped;
if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) { if (Instruction::RdValue(mem_read_instr) ==
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr); reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr); reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize; pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed); mov(reg_popped, reg_pushed);
} else if ((mem_write_instr & kRdMask) } else if (Instruction::RdValue(mem_write_instr) !=
!= (ldr_instr & kRdMask)) { Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr); reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr); reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize; pc_ -= 3 * kInstrSize;
emit(ldr_instr); emit(ldr_instr);
mov(reg_popped, reg_pushed); mov(reg_popped, reg_pushed);
} else if (((mem_read_instr & kRdMask) } else if ((Instruction::RdValue(mem_read_instr) !=
!= (ldr_instr & kRdMask)) || Instruction::RdValue(ldr_instr)) ||
((mem_write_instr & kRdMask) (Instruction::RdValue(mem_write_instr) ==
== (ldr_instr & kRdMask)) ) { Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr); reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr); reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize; pc_ -= 3 * kInstrSize;
@ -1640,18 +1591,14 @@ void Assembler::stm(BlockAddrMode am,
// enabling/disabling and a counter feature. See simulator-arm.h . // enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) { void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__ #ifndef __arm__
// See constants-arm.h SoftwareInterruptCodes. Unluckily the Assembler and
// Simulator do not share constants declaration.
ASSERT(code >= kDefaultStopCode); ASSERT(code >= kDefaultStopCode);
static const uint32_t kStopInterruptCode = 1 << 23;
static const uint32_t kMaxStopCode = kStopInterruptCode - 1;
// The Simulator will handle the stop instruction and get the message address. // The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction. // It expects to find the address just after the svc instruction.
BlockConstPoolFor(2); BlockConstPoolFor(2);
if (code >= 0) { if (code >= 0) {
svc(kStopInterruptCode + code, cond); svc(kStopCode + code, cond);
} else { } else {
svc(kStopInterruptCode + kMaxStopCode, cond); svc(kStopCode + kMaxStopCode, cond);
} }
emit(reinterpret_cast<Instr>(msg)); emit(reinterpret_cast<Instr>(msg));
#else // def __arm__ #else // def __arm__
@ -1673,7 +1620,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
void Assembler::bkpt(uint32_t imm16) { // v5 and above void Assembler::bkpt(uint32_t imm16) { // v5 and above
ASSERT(is_uint16(imm16)); ASSERT(is_uint16(imm16));
emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf)); emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
} }
@ -1703,7 +1650,7 @@ void Assembler::cdp2(Coprocessor coproc,
CRegister crn, CRegister crn,
CRegister crm, CRegister crm,
int opcode_2) { // v5 and above int opcode_2) { // v5 and above
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv)); cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -1726,7 +1673,7 @@ void Assembler::mcr2(Coprocessor coproc,
CRegister crn, CRegister crn,
CRegister crm, CRegister crm,
int opcode_2) { // v5 and above int opcode_2) { // v5 and above
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -1749,7 +1696,7 @@ void Assembler::mrc2(Coprocessor coproc,
CRegister crn, CRegister crn,
CRegister crm, CRegister crm,
int opcode_2) { // v5 and above int opcode_2) { // v5 and above
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv)); mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
} }
@ -1779,7 +1726,7 @@ void Assembler::ldc2(Coprocessor coproc,
CRegister crd, CRegister crd,
const MemOperand& src, const MemOperand& src,
LFlag l) { // v5 and above LFlag l) { // v5 and above
ldc(coproc, crd, src, l, static_cast<Condition>(nv)); ldc(coproc, crd, src, l, kSpecialCondition);
} }
@ -1788,7 +1735,7 @@ void Assembler::ldc2(Coprocessor coproc,
Register rn, Register rn,
int option, int option,
LFlag l) { // v5 and above LFlag l) { // v5 and above
ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); ldc(coproc, crd, rn, option, l, kSpecialCondition);
} }
@ -1818,7 +1765,7 @@ void Assembler::stc2(Coprocessor
coproc, CRegister crd, coproc, CRegister crd,
const MemOperand& dst, const MemOperand& dst,
LFlag l) { // v5 and above LFlag l) { // v5 and above
stc(coproc, crd, dst, l, static_cast<Condition>(nv)); stc(coproc, crd, dst, l, kSpecialCondition);
} }
@ -1827,7 +1774,7 @@ void Assembler::stc2(Coprocessor coproc,
Register rn, Register rn,
int option, int option,
LFlag l) { // v5 and above LFlag l) { // v5 and above
stc(coproc, crd, rn, option, l, static_cast<Condition>(nv)); stc(coproc, crd, rn, option, l, kSpecialCondition);
} }
@ -2637,7 +2584,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Instruction to patch must be a ldr/str [pc, #offset]. // Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0. // P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16)); (2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8; int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32 ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32

168
deps/v8/src/arm/assembler-arm.h

@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_ #define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h> #include <stdio.h>
#include "assembler.h" #include "assembler.h"
#include "constants-arm.h"
#include "serialize.h" #include "serialize.h"
namespace v8 { namespace v8 {
@ -300,18 +301,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 }; const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 }; const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
@ -372,149 +361,6 @@ enum Coprocessor {
}; };
// Condition field in instructions.
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
nz = 1 << 28, // Z clear not zero.
cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, // V set overflow.
vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, // N != V less than.
gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 // always.
};
// Returns the equivalent of !cc.
inline Condition NegateCondition(Condition cc) {
ASSERT(cc != al);
return static_cast<Condition>(cc ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
switch (cc) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cc;
};
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants
// Shifter operand shift operation
enum ShiftOp {
LSL = 0 << 5,
LSR = 1 << 5,
ASR = 2 << 5,
ROR = 3 << 5,
RRX = -1
};
// Condition code updating mode
enum SBit {
SetCC = 1 << 20, // set condition code
LeaveCC = 0 << 20 // leave condition code unchanged
};
// Status register selection
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
};
// Status register fields
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
};
// Status register field mask (or'ed SRegisterField enum values)
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode
enum AddrMode {
// bit encoding P U W
Offset = (8|4|0) << 21, // offset (without writeback to base)
PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
};
// Load/store multiple addressing mode
enum BlockAddrMode {
// bit encoding P U W
da = (0|0|0) << 21, // decrement after
ia = (0|4|0) << 21, // increment after
db = (8|0|0) << 21, // decrement before
ib = (8|4|0) << 21, // increment before
da_w = (0|0|1) << 21, // decrement after with writeback to base
ia_w = (0|4|1) << 21, // increment after with writeback to base
db_w = (8|0|1) << 21, // decrement before with writeback to base
ib_w = (8|4|1) << 21 // increment before with writeback to base
};
// Coprocessor load/store operand size
enum LFlag {
Long = 1 << 22, // long load/store coprocessor
Short = 0 << 22 // short load/store coprocessor
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Machine instruction Operands // Machine instruction Operands
@ -658,9 +504,6 @@ class CpuFeatures : public AllStatic {
}; };
typedef int32_t Instr;
extern const Instr kMovLrPc; extern const Instr kMovLrPc;
extern const Instr kLdrPCMask; extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern; extern const Instr kLdrPCPattern;
@ -680,15 +523,11 @@ extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask; extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern; extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip; extern const Instr kCmpCmnFlip;
extern const Instr kALUMask;
extern const Instr kAddPattern;
extern const Instr kSubPattern;
extern const Instr kAndPattern;
extern const Instr kBicPattern;
extern const Instr kAddSubFlip; extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip; extern const Instr kAndBicFlip;
class Assembler : public Malloced { class Assembler : public Malloced {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
@ -1001,7 +840,6 @@ class Assembler : public Malloced {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support // Exception-generating instructions and debugging support
static const int kDefaultStopCode = -1;
void stop(const char* msg, void stop(const char* msg,
Condition cond = al, Condition cond = al,
int32_t code = kDefaultStopCode); int32_t code = kDefaultStopCode);

6
deps/v8/src/arm/builtins-arm.cc

@ -190,7 +190,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Check whether an empty sized array is requested. // Check whether an empty sized array is requested.
__ tst(array_size, array_size); __ tst(array_size, array_size);
__ b(nz, &not_empty); __ b(ne, &not_empty);
// If an empty array is requested allocate a small elements array anyway. This // If an empty array is requested allocate a small elements array anyway. This
// keeps the code below free of special casing for the empty array. // keeps the code below free of special casing for the empty array.
@ -566,7 +566,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// if it's a string already before calling the conversion builtin. // if it's a string already before calling the conversion builtin.
Label convert_argument; Label convert_argument;
__ bind(&not_cached); __ bind(&not_cached);
__ BranchOnSmi(r0, &convert_argument); __ JumpIfSmi(r0, &convert_argument);
// Is it a String? // Is it a String?
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(r2, Operand(debug_step_in_fp)); __ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
__ tst(r2, r2); __ tst(r2, r2);
__ b(nz, &rt_call); __ b(ne, &rt_call);
#endif #endif
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.

748
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

111
deps/v8/src/arm/code-stubs-arm.h

@ -218,6 +218,117 @@ class GenericBinaryOpStub : public CodeStub {
}; };
class TypeRecordingBinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp3_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
}
#endif
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP3Bits::encode(use_vfp3_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateVFPOperation(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
// Flag that indicates how to generate code for the stub StringAddStub. // Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags { enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0, NO_STRING_ADD_FLAGS = 0,

2
deps/v8/src/arm/codegen-arm-inl.h

@ -39,7 +39,7 @@ namespace internal {
// Platform-specific inline functions. // Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); } void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); } void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __ #undef __

40
deps/v8/src/arm/codegen-arm.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
frame_->AssertIsSpilled(); frame_->AssertIsSpilled();
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
// Allocate local context. // Allocate local context.
// Get outer context and create a new context based on it. // Get outer context and create a new context based on it.
@ -1589,7 +1589,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
} }
void CodeGenerator::Comparison(Condition cc, void CodeGenerator::Comparison(Condition cond,
Expression* left, Expression* left,
Expression* right, Expression* right,
bool strict) { bool strict) {
@ -1603,7 +1603,7 @@ void CodeGenerator::Comparison(Condition cc,
// result : cc register // result : cc register
// Strict only makes sense for equality comparisons. // Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == eq); ASSERT(!strict || cond == eq);
Register lhs; Register lhs;
Register rhs; Register rhs;
@ -1614,8 +1614,8 @@ void CodeGenerator::Comparison(Condition cc,
// We load the top two stack positions into registers chosen by the virtual // We load the top two stack positions into registers chosen by the virtual
// frame. This should keep the register shuffling to a minimum. // frame. This should keep the register shuffling to a minimum.
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == gt || cc == le) { if (cond == gt || cond == le) {
cc = ReverseCondition(cc); cond = ReverseCondition(cond);
lhs_is_smi = frame_->KnownSmiAt(0); lhs_is_smi = frame_->KnownSmiAt(0);
rhs_is_smi = frame_->KnownSmiAt(1); rhs_is_smi = frame_->KnownSmiAt(1);
lhs = frame_->PopToRegister(); lhs = frame_->PopToRegister();
@ -1655,7 +1655,7 @@ void CodeGenerator::Comparison(Condition cc,
// Perform non-smi comparison by stub. // Perform non-smi comparison by stub.
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
// We call with 0 args because there are 0 on the stack. // We call with 0 args because there are 0 on the stack.
CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs); CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
frame_->CallStub(&stub, 0); frame_->CallStub(&stub, 0);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
exit.Jump(); exit.Jump();
@ -1667,7 +1667,7 @@ void CodeGenerator::Comparison(Condition cc,
__ cmp(lhs, Operand(rhs)); __ cmp(lhs, Operand(rhs));
exit.Bind(); exit.Bind();
cc_reg_ = cc; cc_reg_ = cond;
} }
@ -1762,7 +1762,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// sp[2]: applicand. // sp[2]: applicand.
// Check that the receiver really is a JavaScript object. // Check that the receiver really is a JavaScript object.
__ BranchOnSmi(receiver_reg, &build_args); __ JumpIfSmi(receiver_reg, &build_args);
// We allow all JSObjects including JSFunctions. As long as // We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right // JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
@ -1774,7 +1774,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand.apply is Function.prototype.apply. // Check that applicand.apply is Function.prototype.apply.
__ ldr(r0, MemOperand(sp, kPointerSize)); __ ldr(r0, MemOperand(sp, kPointerSize));
__ BranchOnSmi(r0, &build_args); __ JumpIfSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args); __ b(ne, &build_args);
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
@ -1785,7 +1785,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Check that applicand is a function. // Check that applicand is a function.
__ ldr(r1, MemOperand(sp, 2 * kPointerSize)); __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ BranchOnSmi(r1, &build_args); __ JumpIfSmi(r1, &build_args);
__ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
__ b(ne, &build_args); __ b(ne, &build_args);
@ -1885,8 +1885,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
void CodeGenerator::Branch(bool if_true, JumpTarget* target) { void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
ASSERT(has_cc()); ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
target->Branch(cc); target->Branch(cond);
cc_reg_ = al; cc_reg_ = al;
} }
@ -4618,8 +4618,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
ASSERT(runtime.entry_frame() == NULL); ASSERT(runtime.entry_frame() == NULL);
runtime.set_entry_frame(frame_); runtime.set_entry_frame(frame_);
__ BranchOnNotSmi(exponent, &exponent_nonsmi); __ JumpIfNotSmi(exponent, &exponent_nonsmi);
__ BranchOnNotSmi(base, &base_nonsmi); __ JumpIfNotSmi(base, &base_nonsmi);
heap_number_map = r6; heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
@ -5572,7 +5572,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
deferred->Branch(lt); deferred->Branch(lt);
__ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset)); __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
__ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
deferred->Branch(nz); deferred->Branch(ne);
// Check the object's elements are in fast case and writable. // Check the object's elements are in fast case and writable.
__ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
@ -5589,7 +5589,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ mov(tmp2, index1); __ mov(tmp2, index1);
__ orr(tmp2, tmp2, index2); __ orr(tmp2, tmp2, index2);
__ tst(tmp2, Operand(kSmiTagMask)); __ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz); deferred->Branch(ne);
// Check that both indices are valid. // Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset)); __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
@ -5849,14 +5849,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable // Delete from the context holding the named variable.
frame_->EmitPush(cp); frame_->EmitPush(cp);
frame_->EmitPush(Operand(variable->name())); frame_->EmitPush(Operand(variable->name()));
frame_->CallRuntime(Runtime::kLookupContext, 2); frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
// r0: context
frame_->EmitPush(r0);
frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->EmitPush(r0); frame_->EmitPush(r0);
} else { } else {

10
deps/v8/src/arm/constants-arm.cc

@ -32,12 +32,10 @@
#include "constants-arm.h" #include "constants-arm.h"
namespace assembler { namespace v8 {
namespace arm { namespace internal {
namespace v8i = v8::internal; double Instruction::DoubleImmedVmov() const {
double Instr::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction. // Reconstruct a double from the immediate encoded in the vmov instruction.
// //
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh] // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@ -149,6 +147,6 @@ int Registers::Number(const char* name) {
} }
} } // namespace assembler::arm } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

588
deps/v8/src/arm/constants-arm.h

@ -86,8 +86,8 @@
#define USE_BLX 1 #define USE_BLX 1
#endif #endif
namespace assembler { namespace v8 {
namespace arm { namespace internal {
// Number of registers in normal ARM mode. // Number of registers in normal ARM mode.
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
@ -102,6 +102,9 @@ static const int kNumVFPRegisters =
static const int kPCRegister = 15; static const int kPCRegister = 15;
static const int kNoRegister = -1; static const int kNoRegister = -1;
// -----------------------------------------------------------------------------
// Conditions.
// Defines constants and accessor classes to assemble, disassemble and // Defines constants and accessor classes to assemble, disassemble and
// simulate ARM instructions. // simulate ARM instructions.
// //
@ -111,78 +114,246 @@ static const int kNoRegister = -1;
// Constants for specific fields are defined in their respective named enums. // Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr. // General constants are in an anonymous enum in class Instr.
typedef unsigned char byte;
// Values for the condition field as defined in section A3.2 // Values for the condition field as defined in section A3.2
enum Condition { enum Condition {
no_condition = -1, kNoCondition = -1,
EQ = 0, // equal
NE = 1, // not equal eq = 0 << 28, // Z set Equal.
CS = 2, // carry set/unsigned higher or same ne = 1 << 28, // Z clear Not equal.
CC = 3, // carry clear/unsigned lower cs = 2 << 28, // C set Unsigned higher or same.
MI = 4, // minus/negative cc = 3 << 28, // C clear Unsigned lower.
PL = 5, // plus/positive or zero mi = 4 << 28, // N set Negative.
VS = 6, // overflow pl = 5 << 28, // N clear Positive or zero.
VC = 7, // no overflow vs = 6 << 28, // V set Overflow.
HI = 8, // unsigned higher vc = 7 << 28, // V clear No overflow.
LS = 9, // unsigned lower or same hi = 8 << 28, // C set, Z clear Unsigned higher.
GE = 10, // signed greater than or equal ls = 9 << 28, // C clear or Z set Unsigned lower or same.
LT = 11, // signed less than ge = 10 << 28, // N == V Greater or equal.
GT = 12, // signed greater than lt = 11 << 28, // N != V Less than.
LE = 13, // signed less than or equal gt = 12 << 28, // Z clear, N == V Greater than.
AL = 14, // always (unconditional) le = 13 << 28, // Z set or N != V Less then or equal
special_condition = 15, // special condition (refer to section A3.2.1) al = 14 << 28, // Always.
max_condition = 16
kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
kNumberOfConditions = 16,
// Aliases.
hs = cs, // C set Unsigned higher or same.
lo = cc // C clear Unsigned lower.
}; };
inline Condition NegateCondition(Condition cond) {
ASSERT(cond != al);
return static_cast<Condition>(cond ^ ne);
}
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cond) {
switch (cond) {
case lo:
return hi;
case hi:
return lo;
case hs:
return ls;
case ls:
return hs;
case lt:
return gt;
case gt:
return lt;
case ge:
return le;
case le:
return ge;
default:
return cond;
};
}
// -----------------------------------------------------------------------------
// Instructions encoding.
// Instr is merely used by the Assembler to distinguish 32bit integers
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
typedef int32_t Instr;
// Opcodes for Data-processing instructions (instructions with a type 0 and 1) // Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4 // as defined in section A3.4
enum Opcode { enum Opcode {
no_operand = -1, AND = 0 << 21, // Logical AND.
AND = 0, // Logical AND EOR = 1 << 21, // Logical Exclusive OR.
EOR = 1, // Logical Exclusive OR SUB = 2 << 21, // Subtract.
SUB = 2, // Subtract RSB = 3 << 21, // Reverse Subtract.
RSB = 3, // Reverse Subtract ADD = 4 << 21, // Add.
ADD = 4, // Add ADC = 5 << 21, // Add with Carry.
ADC = 5, // Add with Carry SBC = 6 << 21, // Subtract with Carry.
SBC = 6, // Subtract with Carry RSC = 7 << 21, // Reverse Subtract with Carry.
RSC = 7, // Reverse Subtract with Carry TST = 8 << 21, // Test.
TST = 8, // Test TEQ = 9 << 21, // Test Equivalence.
TEQ = 9, // Test Equivalence CMP = 10 << 21, // Compare.
CMP = 10, // Compare CMN = 11 << 21, // Compare Negated.
CMN = 11, // Compare Negated ORR = 12 << 21, // Logical (inclusive) OR.
ORR = 12, // Logical (inclusive) OR MOV = 13 << 21, // Move.
MOV = 13, // Move BIC = 14 << 21, // Bit Clear.
BIC = 14, // Bit Clear MVN = 15 << 21 // Move Not.
MVN = 15, // Move Not
max_operand = 16
}; };
// The bits for bit 7-4 for some type 0 miscellaneous instructions. // The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 { enum MiscInstructionsBits74 {
// With bits 22-21 01. // With bits 22-21 01.
BX = 1, BX = 1 << 4,
BXJ = 2, BXJ = 2 << 4,
BLX = 3, BLX = 3 << 4,
BKPT = 7, BKPT = 7 << 4,
// With bits 22-21 11. // With bits 22-21 11.
CLZ = 1 CLZ = 1 << 4
};
// Instruction encoding bits and masks.
enum {
H = 1 << 5, // Halfword (or byte).
S6 = 1 << 6, // Signed (or unsigned).
L = 1 << 20, // Load (or store).
S = 1 << 20, // Set condition code (or leave unchanged).
W = 1 << 21, // Writeback base register (or leave unchanged).
A = 1 << 21, // Accumulate in multiply instruction (or not).
B = 1 << 22, // Unsigned byte (or word).
N = 1 << 22, // Long (or short).
U = 1 << 23, // Positive (or negative) offset/index.
P = 1 << 24, // Offset/pre-indexed addressing (or post-indexed addressing).
I = 1 << 25, // Immediate shifter operand (or not).
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
B23 = 1 << 23,
B24 = 1 << 24,
B25 = 1 << 25,
B26 = 1 << 26,
B27 = 1 << 27,
B28 = 1 << 28,
// Instruction bit masks.
kCondMask = 15 << 28,
kALUMask = 0x6f << 21,
kRdMask = 15 << 12, // In str instruction.
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
kOff12Mask = (1 << 12) - 1
};
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// Condition code updating mode.
enum SBit {
SetCC = 1 << 20, // Set condition code.
LeaveCC = 0 << 20 // Leave condition code unchanged.
};
// Status register selection.
enum SRegister {
CPSR = 0 << 22,
SPSR = 1 << 22
}; };
// Shifter types for Data-processing operands as defined in section A5.1.2. // Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift { enum ShiftOp {
no_shift = -1, LSL = 0 << 5, // Logical shift left.
LSL = 0, // Logical shift left LSR = 1 << 5, // Logical shift right.
LSR = 1, // Logical shift right ASR = 2 << 5, // Arithmetic shift right.
ASR = 2, // Arithmetic shift right ROR = 3 << 5, // Rotate right.
ROR = 3, // Rotate right
max_shift = 4 // RRX is encoded as ROR with shift_imm == 0.
// Use a special code to make the distinction. The RRX ShiftOp is only used
// as an argument, and will never actually be encoded. The Assembler will
// detect it and emit the correct ROR shift operand with shift_imm == 0.
RRX = -1,
kNumberOfShifts = 4
};
// Status register fields.
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
CPSR_x = CPSR | 1 << 17,
CPSR_s = CPSR | 1 << 18,
CPSR_f = CPSR | 1 << 19,
SPSR_c = SPSR | 1 << 16,
SPSR_x = SPSR | 1 << 17,
SPSR_s = SPSR | 1 << 18,
SPSR_f = SPSR | 1 << 19
}; };
// Status register field mask (or'ed SRegisterField enum values).
typedef uint32_t SRegisterFieldMask;
// Memory operand addressing mode.
enum AddrMode {
// Bit encoding P U W.
Offset = (8|4|0) << 21, // Offset (without writeback to base).
PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
};
// Load/store multiple addressing mode.
enum BlockAddrMode {
// Bit encoding P U W .
da = (0|0|0) << 21, // Decrement after.
ia = (0|4|0) << 21, // Increment after.
db = (8|0|0) << 21, // Decrement before.
ib = (8|4|0) << 21, // Increment before.
da_w = (0|0|1) << 21, // Decrement after with writeback to base.
ia_w = (0|4|1) << 21, // Increment after with writeback to base.
db_w = (8|0|1) << 21, // Decrement before with writeback to base.
ib_w = (8|4|1) << 21, // Increment before with writeback to base.
// Alias modes for comparison when writeback does not matter.
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
ib_x = (8|4|0) << 21 // Increment before.
};
// Coprocessor load/store operand size.
enum LFlag {
Long = 1 << 22, // Long load/store coprocessor.
Short = 0 << 22 // Short load/store coprocessor.
};
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
// Special Software Interrupt codes when used in the presence of the ARM // Special Software Interrupt codes when used in the presence of the ARM
// simulator. // simulator.
@ -190,14 +361,15 @@ enum Shift {
// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature. // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
enum SoftwareInterruptCodes { enum SoftwareInterruptCodes {
// transition to C code // transition to C code
call_rt_redirected = 0x10, kCallRtRedirected= 0x10,
// break point // break point
break_point = 0x20, kBreakpoint= 0x20,
// stop // stop
stop = 1 << 23 kStopCode = 1 << 23
}; };
static const int32_t kStopCodeMask = stop - 1; static const uint32_t kStopCodeMask = kStopCode - 1;
static const uint32_t kMaxStopCode = stop - 1; static const uint32_t kMaxStopCode = kStopCode - 1;
static const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding. // Type of VFP register. Determines register encoding.
@ -206,6 +378,20 @@ enum VFPRegPrecision {
kDoublePrecision = 1 kDoublePrecision = 1
}; };
// VFP FPSCR constants.
static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
// VFP rounding modes. See ARM DDI 0406B Page A2-29. // VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum FPSCRRoundingModes { enum FPSCRRoundingModes {
RN, // Round to Nearest. RN, // Round to Nearest.
@ -214,22 +400,91 @@ enum FPSCRRoundingModes {
RZ // Round towards zero. RZ // Round towards zero.
}; };
typedef int32_t instr_t;
// -----------------------------------------------------------------------------
// Hints.
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
// These constants are declared in assembler-arm.cc, as they use named registers
// and other constants.
// add(sp, sp, 4) instruction (aka Pop())
extern const Instr kPopInstruction;
// The class Instr enables access to individual fields defined in the ARM // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
// register r is not encoded.
extern const Instr kPushRegPattern;
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
// register r is not encoded.
extern const Instr kPopRegPattern;
// mov lr, pc
extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kMovMvnMask;
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
extern const Instr kMovLeaveCCMask;
extern const Instr kMovLeaveCCPattern;
extern const Instr kMovwMask;
extern const Instr kMovwPattern;
extern const Instr kMovwLeaveCCFlip;
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
// A mask for the Rd register for push, pop, ldr, str instructions.
extern const Instr kLdrRegFpOffsetPattern;
extern const Instr kStrRegFpOffsetPattern;
extern const Instr kLdrRegFpNegOffsetPattern;
extern const Instr kStrRegFpNegOffsetPattern;
extern const Instr kLdrStrInstrTypeMask;
extern const Instr kLdrStrInstrArgumentMask;
extern const Instr kLdrStrOffsetMask;
// -----------------------------------------------------------------------------
// Instruction abstraction.
// The class Instruction enables access to individual fields defined in the ARM
// architecture instruction set encoding as described in figure A3-1. // architecture instruction set encoding as described in figure A3-1.
// Note that the Assembler uses typedef int32_t Instr.
// //
// Example: Test whether the instruction at ptr does set the condition code // Example: Test whether the instruction at ptr does set the condition code
// bits. // bits.
// //
// bool InstructionSetsConditionCodes(byte* ptr) { // bool InstructionSetsConditionCodes(byte* ptr) {
// Instr* instr = Instr::At(ptr); // Instruction* instr = Instruction::At(ptr);
// int type = instr->TypeField(); // int type = instr->TypeValue();
// return ((type == 0) || (type == 1)) && instr->HasS(); // return ((type == 0) || (type == 1)) && instr->HasS();
// } // }
// //
class Instr { class Instruction {
public: public:
enum { enum {
kInstrSize = 4, kInstrSize = 4,
@ -237,14 +492,24 @@ class Instr {
kPCReadOffset = 8 kPCReadOffset = 8
}; };
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
static inline return_type Name(Instr instr) { \
char* temp = reinterpret_cast<char*>(&instr); \
return reinterpret_cast<Instruction*>(temp)->Name(); \
}
#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits. // Get the raw instruction bits.
inline instr_t InstructionBits() const { inline Instr InstructionBits() const {
return *reinterpret_cast<const instr_t*>(this); return *reinterpret_cast<const Instr*>(this);
} }
// Set the raw instruction bits to value. // Set the raw instruction bits to value.
inline void SetInstructionBits(instr_t value) { inline void SetInstructionBits(Instr value) {
*reinterpret_cast<instr_t*>(this) = value; *reinterpret_cast<Instr*>(this) = value;
} }
// Read one particular bit out of the instruction bits. // Read one particular bit out of the instruction bits.
@ -252,93 +517,141 @@ class Instr {
return (InstructionBits() >> nr) & 1; return (InstructionBits() >> nr) & 1;
} }
// Read a bit field out of the instruction bits. // Read a bit field's value out of the instruction bits.
inline int Bits(int hi, int lo) const { inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1); return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
} }
// Read a bit field out of the instruction bits.
inline int BitField(int hi, int lo) const {
return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
}
// Static support.
// Read one particular bit out of the instruction bits.
static inline int Bit(Instr instr, int nr) {
return (instr >> nr) & 1;
}
// Read the value of a bit field out of the instruction bits.
static inline int Bits(Instr instr, int hi, int lo) {
return (instr >> lo) & ((2 << (hi - lo)) - 1);
}
// Read a bit field out of the instruction bits.
static inline int BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
}
// Accessors for the different named fields used in the ARM encoding. // Accessors for the different named fields used in the ARM encoding.
// The naming of these accessor corresponds to figure A3-1. // The naming of these accessor corresponds to figure A3-1.
//
// Two kind of accessors are declared:
// - <Name>Field() will return the raw field, ie the field's bits at their
// original place in the instruction encoding.
// eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
// ConditionField(instr) will return 0xC0000000.
// - <Name>Value() will return the field value, shifted back to bit 0.
// eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
// ConditionField(instr) will return 0xC.
// Generally applicable fields // Generally applicable fields
inline Condition ConditionField() const { inline Condition ConditionValue() const {
return static_cast<Condition>(Bits(31, 28)); return static_cast<Condition>(Bits(31, 28));
} }
inline int TypeField() const { return Bits(27, 25); } inline Condition ConditionField() const {
return static_cast<Condition>(BitField(31, 28));
}
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionValue);
DECLARE_STATIC_TYPED_ACCESSOR(Condition, ConditionField);
inline int RnField() const { return Bits(19, 16); } inline int TypeValue() const { return Bits(27, 25); }
inline int RdField() const { return Bits(15, 12); }
inline int CoprocessorField() const { return Bits(11, 8); } inline int RnValue() const { return Bits(19, 16); }
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
inline int CoprocessorValue() const { return Bits(11, 8); }
// Support for VFP. // Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0) // Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); } inline int VnValue() const { return Bits(19, 16); }
inline int VmField() const { return Bits(3, 0); } inline int VmValue() const { return Bits(3, 0); }
inline int VdField() const { return Bits(15, 12); } inline int VdValue() const { return Bits(15, 12); }
inline int NField() const { return Bit(7); } inline int NValue() const { return Bit(7); }
inline int MField() const { return Bit(5); } inline int MValue() const { return Bit(5); }
inline int DField() const { return Bit(22); } inline int DValue() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); } inline int RtValue() const { return Bits(15, 12); }
inline int PField() const { return Bit(24); } inline int PValue() const { return Bit(24); }
inline int UField() const { return Bit(23); } inline int UValue() const { return Bit(23); }
inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); } inline int Opc1Value() const { return (Bit(23) << 2) | Bits(21, 20); }
inline int Opc2Field() const { return Bits(19, 16); } inline int Opc2Value() const { return Bits(19, 16); }
inline int Opc3Field() const { return Bits(7, 6); } inline int Opc3Value() const { return Bits(7, 6); }
inline int SzField() const { return Bit(8); } inline int SzValue() const { return Bit(8); }
inline int VLField() const { return Bit(20); } inline int VLValue() const { return Bit(20); }
inline int VCField() const { return Bit(8); } inline int VCValue() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); } inline int VAValue() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); } inline int VBValue() const { return Bits(6, 5); }
inline int VFPNRegCode(VFPRegPrecision pre) { inline int VFPNRegValue(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 16, 7); return VFPGlueRegValue(pre, 16, 7);
} }
inline int VFPMRegCode(VFPRegPrecision pre) { inline int VFPMRegValue(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 0, 5); return VFPGlueRegValue(pre, 0, 5);
} }
inline int VFPDRegCode(VFPRegPrecision pre) { inline int VFPDRegValue(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 12, 22); return VFPGlueRegValue(pre, 12, 22);
} }
// Fields used in Data processing instructions // Fields used in Data processing instructions
inline Opcode OpcodeField() const { inline int OpcodeValue() const {
return static_cast<Opcode>(Bits(24, 21)); return static_cast<Opcode>(Bits(24, 21));
} }
inline int SField() const { return Bit(20); } inline Opcode OpcodeField() const {
return static_cast<Opcode>(BitField(24, 21));
}
inline int SValue() const { return Bit(20); }
// with register // with register
inline int RmField() const { return Bits(3, 0); } inline int RmValue() const { return Bits(3, 0); }
inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); } inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline int RegShiftField() const { return Bit(4); } inline ShiftOp ShiftField() const {
inline int RsField() const { return Bits(11, 8); } return static_cast<ShiftOp>(BitField(6, 5));
inline int ShiftAmountField() const { return Bits(11, 7); } }
inline int RegShiftValue() const { return Bit(4); }
inline int RsValue() const { return Bits(11, 8); }
inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate // with immediate
inline int RotateField() const { return Bits(11, 8); } inline int RotateValue() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); } inline int Immed8Value() const { return Bits(7, 0); }
inline int Immed4Field() const { return Bits(19, 16); } inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtField() const { inline int ImmedMovwMovtValue() const {
return Immed4Field() << 12 | Offset12Field(); } return Immed4Value() << 12 | Offset12Value(); }
// Fields used in Load/Store instructions // Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); } inline int PUValue() const { return Bits(24, 23); }
inline int BField() const { return Bit(22); } inline int PUField() const { return BitField(24, 23); }
inline int WField() const { return Bit(21); } inline int BValue() const { return Bit(22); }
inline int LField() const { return Bit(20); } inline int WValue() const { return Bit(21); }
inline int LValue() const { return Bit(20); }
// with register uses same fields as Data processing instructions above // with register uses same fields as Data processing instructions above
// with immediate // with immediate
inline int Offset12Field() const { return Bits(11, 0); } inline int Offset12Value() const { return Bits(11, 0); }
// multiple // multiple
inline int RlistField() const { return Bits(15, 0); } inline int RlistValue() const { return Bits(15, 0); }
// extra loads and stores // extra loads and stores
inline int SignField() const { return Bit(6); } inline int SignValue() const { return Bit(6); }
inline int HField() const { return Bit(5); } inline int HValue() const { return Bit(5); }
inline int ImmedHField() const { return Bits(11, 8); } inline int ImmedHValue() const { return Bits(11, 8); }
inline int ImmedLField() const { return Bits(3, 0); } inline int ImmedLValue() const { return Bits(3, 0); }
// Fields used in Branch instructions // Fields used in Branch instructions
inline int LinkField() const { return Bit(24); } inline int LinkValue() const { return Bit(24); }
inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); } inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
// Fields used in Software interrupt instructions // Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcField() const { inline SoftwareInterruptCodes SvcValue() const {
return static_cast<SoftwareInterruptCodes>(Bits(23, 0)); return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
} }
@ -354,42 +667,45 @@ class Instr {
// Test for a stop instruction. // Test for a stop instruction.
inline bool IsStop() const { inline bool IsStop() const {
return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop); return (TypeValue() == 7) && (Bit(24) == 1) && (SvcValue() >= kStopCode);
} }
// Special accessors that test for existence of a value. // Special accessors that test for existence of a value.
inline bool HasS() const { return SField() == 1; } inline bool HasS() const { return SValue() == 1; }
inline bool HasB() const { return BField() == 1; } inline bool HasB() const { return BValue() == 1; }
inline bool HasW() const { return WField() == 1; } inline bool HasW() const { return WValue() == 1; }
inline bool HasL() const { return LField() == 1; } inline bool HasL() const { return LValue() == 1; }
inline bool HasU() const { return UField() == 1; } inline bool HasU() const { return UValue() == 1; }
inline bool HasSign() const { return SignField() == 1; } inline bool HasSign() const { return SignValue() == 1; }
inline bool HasH() const { return HField() == 1; } inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkField() == 1; } inline bool HasLink() const { return LinkValue() == 1; }
// Decoding the double immediate in the vmov instruction. // Decoding the double immediate in the vmov instruction.
double DoubleImmedVmov() const; double DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a // Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way // reference to an instruction is to convert a pointer. There is no way
// to allocate or create instances of class Instr. // to allocate or create instances of class Instruction.
// Use the At(pc) function to create references to Instr. // Use the At(pc) function to create references to Instruction.
static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); } static Instruction* At(byte* pc) {
return reinterpret_cast<Instruction*>(pc);
}
private: private:
// Join split register codes, depending on single or double precision. // Join split register codes, depending on single or double precision.
// four_bit is the position of the least-significant bit of the four // four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit // bit specifier. one_bit is the position of the additional single bit
// specifier. // specifier.
inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) { inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) { if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit); return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
} }
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit); return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
} }
// We need to prevent the creation of instances of class Instr. // We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
}; };
@ -428,6 +744,6 @@ class VFPRegisters {
}; };
} } // namespace assembler::arm } } // namespace v8::internal
#endif // V8_ARM_CONSTANTS_ARM_H_ #endif // V8_ARM_CONSTANTS_ARM_H_

2
deps/v8/src/arm/cpu-arm.cc

@ -56,7 +56,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed. // that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues // None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots. // around whether or not to generate the code when building snapshots.
assembler::arm::Simulator::FlushICache(start, size); Simulator::FlushICache(start, size);
#else #else
// Ideally, we would call // Ideally, we would call
// syscall(__ARM_NR_cacheflush, start, // syscall(__ARM_NR_cacheflush, start,

11
deps/v8/src/arm/deoptimizer-arm.cc

@ -112,13 +112,16 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) { Code* replacement_code) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -367,7 +370,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Copy core registers into FrameDescription::registers_[kNumRegisters]. // Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters); ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) { for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset(); int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize)); __ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset)); __ str(r2, MemOperand(r1, offset));
} }
@ -456,7 +459,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push the registers from the last output frame. // Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) { for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kIntSize) + FrameDescription::registers_offset(); int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset)); __ ldr(r6, MemOperand(r2, offset));
__ push(r6); __ push(r6);
} }

459
deps/v8/src/arm/disasm-arm.cc

@ -64,10 +64,8 @@
#include "platform.h" #include "platform.h"
namespace assembler { namespace v8 {
namespace arm { namespace internal {
namespace v8i = v8::internal;
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -78,7 +76,7 @@ namespace v8i = v8::internal;
class Decoder { class Decoder {
public: public:
Decoder(const disasm::NameConverter& converter, Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer) Vector<char> out_buffer)
: converter_(converter), : converter_(converter),
out_buffer_(out_buffer), out_buffer_(out_buffer),
out_buffer_pos_(0) { out_buffer_pos_(0) {
@ -100,45 +98,45 @@ class Decoder {
void PrintRegister(int reg); void PrintRegister(int reg);
void PrintSRegister(int reg); void PrintSRegister(int reg);
void PrintDRegister(int reg); void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format); int FormatVFPRegister(Instruction* instr, const char* format);
void PrintMovwMovt(Instr* instr); void PrintMovwMovt(Instruction* instr);
int FormatVFPinstruction(Instr* instr, const char* format); int FormatVFPinstruction(Instruction* instr, const char* format);
void PrintCondition(Instr* instr); void PrintCondition(Instruction* instr);
void PrintShiftRm(Instr* instr); void PrintShiftRm(Instruction* instr);
void PrintShiftImm(Instr* instr); void PrintShiftImm(Instruction* instr);
void PrintShiftSat(Instr* instr); void PrintShiftSat(Instruction* instr);
void PrintPU(Instr* instr); void PrintPU(Instruction* instr);
void PrintSoftwareInterrupt(SoftwareInterruptCodes svc); void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
// Handle formatting of instructions and their options. // Handle formatting of instructions and their options.
int FormatRegister(Instr* instr, const char* option); int FormatRegister(Instruction* instr, const char* option);
int FormatOption(Instr* instr, const char* option); int FormatOption(Instruction* instr, const char* option);
void Format(Instr* instr, const char* format); void Format(Instruction* instr, const char* format);
void Unknown(Instr* instr); void Unknown(Instruction* instr);
// Each of these functions decodes one particular instruction type, a 3-bit // Each of these functions decodes one particular instruction type, a 3-bit
// field in the instruction encoding. // field in the instruction encoding.
// Types 0 and 1 are combined as they are largely the same except for the way // Types 0 and 1 are combined as they are largely the same except for the way
// they interpret the shifter operand. // they interpret the shifter operand.
void DecodeType01(Instr* instr); void DecodeType01(Instruction* instr);
void DecodeType2(Instr* instr); void DecodeType2(Instruction* instr);
void DecodeType3(Instr* instr); void DecodeType3(Instruction* instr);
void DecodeType4(Instr* instr); void DecodeType4(Instruction* instr);
void DecodeType5(Instr* instr); void DecodeType5(Instruction* instr);
void DecodeType6(Instr* instr); void DecodeType6(Instruction* instr);
// Type 7 includes special Debugger instructions. // Type 7 includes special Debugger instructions.
int DecodeType7(Instr* instr); int DecodeType7(Instruction* instr);
// For VFP support. // For VFP support.
void DecodeTypeVFP(Instr* instr); void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instr* instr); void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr); void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instr* instr); void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr); void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr); void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
const disasm::NameConverter& converter_; const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_; Vector<char> out_buffer_;
int out_buffer_pos_; int out_buffer_pos_;
DISALLOW_COPY_AND_ASSIGN(Decoder); DISALLOW_COPY_AND_ASSIGN(Decoder);
@ -169,15 +167,15 @@ void Decoder::Print(const char* str) {
// These condition names are defined in a way to match the native disassembler // These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>". // formatting. See for example the command "objdump -d <binary file>".
static const char* cond_names[max_condition] = { static const char* cond_names[kNumberOfConditions] = {
"eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" , "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
"hi", "ls", "ge", "lt", "gt", "le", "", "invalid", "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
}; };
// Print the condition guarding the instruction. // Print the condition guarding the instruction.
void Decoder::PrintCondition(Instr* instr) { void Decoder::PrintCondition(Instruction* instr) {
Print(cond_names[instr->ConditionField()]); Print(cond_names[instr->ConditionValue()]);
} }
@ -188,36 +186,37 @@ void Decoder::PrintRegister(int reg) {
// Print the VFP S register name according to the active name converter. // Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) { void Decoder::PrintSRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg, false)); Print(VFPRegisters::Name(reg, false));
} }
// Print the VFP D register name according to the active name converter. // Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) { void Decoder::PrintDRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg, true)); Print(VFPRegisters::Name(reg, true));
} }
// These shift names are defined in a way to match the native disassembler // These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>". // formatting. See for example the command "objdump -d <binary file>".
static const char* shift_names[max_shift] = { static const char* shift_names[kNumberOfShifts] = {
"lsl", "lsr", "asr", "ror" "lsl", "lsr", "asr", "ror"
}; };
// Print the register shift operands for the instruction. Generally used for // Print the register shift operands for the instruction. Generally used for
// data processing instructions. // data processing instructions.
void Decoder::PrintShiftRm(Instr* instr) { void Decoder::PrintShiftRm(Instruction* instr) {
Shift shift = instr->ShiftField(); ShiftOp shift = instr->ShiftField();
int shift_amount = instr->ShiftAmountField(); int shift_index = instr->ShiftValue();
int rm = instr->RmField(); int shift_amount = instr->ShiftAmountValue();
int rm = instr->RmValue();
PrintRegister(rm); PrintRegister(rm);
if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) { if ((instr->RegShiftValue() == 0) && (shift == LSL) && (shift_amount == 0)) {
// Special case for using rm only. // Special case for using rm only.
return; return;
} }
if (instr->RegShiftField() == 0) { if (instr->RegShiftValue() == 0) {
// by immediate // by immediate
if ((shift == ROR) && (shift_amount == 0)) { if ((shift == ROR) && (shift_amount == 0)) {
Print(", RRX"); Print(", RRX");
@ -225,14 +224,15 @@ void Decoder::PrintShiftRm(Instr* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) { } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32; shift_amount = 32;
} }
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d", ", %s #%d",
shift_names[shift], shift_amount); shift_names[shift_index],
shift_amount);
} else { } else {
// by register // by register
int rs = instr->RsField(); int rs = instr->RsValue();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s ", shift_names[shift]); ", %s ", shift_names[shift_index]);
PrintRegister(rs); PrintRegister(rs);
} }
} }
@ -240,43 +240,43 @@ void Decoder::PrintShiftRm(Instr* instr) {
// Print the immediate operand for the instruction. Generally used for data // Print the immediate operand for the instruction. Generally used for data
// processing instructions. // processing instructions.
void Decoder::PrintShiftImm(Instr* instr) { void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateField() * 2; int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Field(); int immed8 = instr->Immed8Value();
int imm = (immed8 >> rotate) | (immed8 << (32 - rotate)); int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d", imm); "#%d", imm);
} }
// Print the optional shift and immediate used by saturating instructions. // Print the optional shift and immediate used by saturating instructions.
void Decoder::PrintShiftSat(Instr* instr) { void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7); int shift = instr->Bits(11, 7);
if (shift > 0) { if (shift > 0) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", %s #%d", ", %s #%d",
shift_names[instr->Bit(6) * 2], shift_names[instr->Bit(6) * 2],
instr->Bits(11, 7)); instr->Bits(11, 7));
} }
} }
// Print PU formatting to reduce complexity of FormatOption. // Print PU formatting to reduce complexity of FormatOption.
void Decoder::PrintPU(Instr* instr) { void Decoder::PrintPU(Instruction* instr) {
switch (instr->PUField()) { switch (instr->PUField()) {
case 0: { case da_x: {
Print("da"); Print("da");
break; break;
} }
case 1: { case ia_x: {
Print("ia"); Print("ia");
break; break;
} }
case 2: { case db_x: {
Print("db"); Print("db");
break; break;
} }
case 3: { case ib_x: {
Print("ib"); Print("ib");
break; break;
} }
@ -292,22 +292,22 @@ void Decoder::PrintPU(Instr* instr) {
// the FormatOption method. // the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) { void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
switch (svc) { switch (svc) {
case call_rt_redirected: case kCallRtRedirected:
Print("call_rt_redirected"); Print("call rt redirected");
return; return;
case break_point: case kBreakpoint:
Print("break_point"); Print("breakpoint");
return; return;
default: default:
if (svc >= stop) { if (svc >= kStopCode) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d - 0x%x", "%d - 0x%x",
svc & kStopCodeMask, svc & kStopCodeMask,
svc & kStopCodeMask); svc & kStopCodeMask);
} else { } else {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", "%d",
svc); svc);
} }
return; return;
} }
@ -316,32 +316,32 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the // Handle all register based formatting in this function to reduce the
// complexity of FormatOption. // complexity of FormatOption.
int Decoder::FormatRegister(Instr* instr, const char* format) { int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r'); ASSERT(format[0] == 'r');
if (format[1] == 'n') { // 'rn: Rn register if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnField(); int reg = instr->RnValue();
PrintRegister(reg); PrintRegister(reg);
return 2; return 2;
} else if (format[1] == 'd') { // 'rd: Rd register } else if (format[1] == 'd') { // 'rd: Rd register
int reg = instr->RdField(); int reg = instr->RdValue();
PrintRegister(reg); PrintRegister(reg);
return 2; return 2;
} else if (format[1] == 's') { // 'rs: Rs register } else if (format[1] == 's') { // 'rs: Rs register
int reg = instr->RsField(); int reg = instr->RsValue();
PrintRegister(reg); PrintRegister(reg);
return 2; return 2;
} else if (format[1] == 'm') { // 'rm: Rm register } else if (format[1] == 'm') { // 'rm: Rm register
int reg = instr->RmField(); int reg = instr->RmValue();
PrintRegister(reg); PrintRegister(reg);
return 2; return 2;
} else if (format[1] == 't') { // 'rt: Rt register } else if (format[1] == 't') { // 'rt: Rt register
int reg = instr->RtField(); int reg = instr->RtValue();
PrintRegister(reg); PrintRegister(reg);
return 2; return 2;
} else if (format[1] == 'l') { } else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions // 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist")); ASSERT(STRING_STARTS_WITH(format, "rlist"));
int rlist = instr->RlistField(); int rlist = instr->RlistValue();
int reg = 0; int reg = 0;
Print("{"); Print("{");
// Print register list in ascending order, by scanning the bit mask. // Print register list in ascending order, by scanning the bit mask.
@ -365,22 +365,22 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
// Handle all VFP register based formatting in this function to reduce the // Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption. // complexity of FormatOption.
int Decoder::FormatVFPRegister(Instr* instr, const char* format) { int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D')); ASSERT((format[0] == 'S') || (format[0] == 'D'));
if (format[1] == 'n') { if (format[1] == 'n') {
int reg = instr->VnField(); int reg = instr->VnValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField())); if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg); if (format[0] == 'D') PrintDRegister(reg);
return 2; return 2;
} else if (format[1] == 'm') { } else if (format[1] == 'm') {
int reg = instr->VmField(); int reg = instr->VmValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField())); if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg); if (format[0] == 'D') PrintDRegister(reg);
return 2; return 2;
} else if (format[1] == 'd') { } else if (format[1] == 'd') {
int reg = instr->VdField(); int reg = instr->VdValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField())); if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
if (format[0] == 'D') PrintDRegister(reg); if (format[0] == 'D') PrintDRegister(reg);
return 2; return 2;
} }
@ -390,19 +390,19 @@ int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
} }
int Decoder::FormatVFPinstruction(Instr* instr, const char* format) { int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
Print(format); Print(format);
return 0; return 0;
} }
// Print the movw or movt instruction. // Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instr* instr) { void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtField(); int imm = instr->ImmedMovwMovtValue();
int rd = instr->RdField(); int rd = instr->RdValue();
PrintRegister(rd); PrintRegister(rd);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
", #%d", imm); ", #%d", imm);
} }
@ -411,7 +411,7 @@ void Decoder::PrintMovwMovt(Instr* instr) {
// character of the option string (the option escape has already been // character of the option string (the option escape has already been
// consumed by the caller.) FormatOption returns the number of // consumed by the caller.) FormatOption returns the number of
// characters that were consumed from the formatting string. // characters that were consumed from the formatting string.
int Decoder::FormatOption(Instr* instr, const char* format) { int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) { switch (format[0]) {
case 'a': { // 'a: accumulate multiplies case 'a': { // 'a: accumulate multiplies
if (instr->Bit(21) == 0) { if (instr->Bit(21) == 0) {
@ -434,8 +434,8 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
} }
case 'd': { // 'd: vmov double immediate. case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov(); double d = instr->DoubleImmedVmov();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%g", d); "#%g", d);
return 1; return 1;
} }
case 'f': { // 'f: bitfield instructions - v7 and above. case 'f': { // 'f: bitfield instructions - v7 and above.
@ -448,8 +448,8 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(width > 0); ASSERT(width > 0);
} }
ASSERT((width + lsbit) <= 32); ASSERT((width + lsbit) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width); "#%d, #%d", lsbit, width);
return 1; return 1;
} }
case 'h': { // 'h: halfword operation for extra loads and stores case 'h': { // 'h: halfword operation for extra loads and stores
@ -469,9 +469,9 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((lsb >= 0) && (lsb <= 31)); ASSERT((lsb >= 0) && (lsb <= 31));
ASSERT((width + lsb) <= 32); ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", "%d",
instr->Bits(width + lsb - 1, lsb)); instr->Bits(width + lsb - 1, lsb));
return 8; return 8;
} }
case 'l': { // 'l: branch and link case 'l': { // 'l: branch and link
@ -505,31 +505,31 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "msg")); ASSERT(STRING_STARTS_WITH(format, "msg"));
byte* str = byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff); reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%s", converter_.NameInCode(str)); "%s", converter_.NameInCode(str));
return 3; return 3;
} }
case 'o': { case 'o': {
if ((format[3] == '1') && (format[4] == '2')) { if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions // 'off12: 12-bit offset for load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off12")); ASSERT(STRING_STARTS_WITH(format, "off12"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field()); "%d", instr->Offset12Value());
return 5; return 5;
} else if (format[3] == '0') { } else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0. // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19")); ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", "%d",
(instr->Bits(19, 8) << 4) + (instr->Bits(19, 8) << 4) +
instr->Bits(3, 0)); instr->Bits(3, 0));
return 15; return 15;
} }
// 'off8: 8-bit offset for extra load and store instructions // 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8")); ASSERT(STRING_STARTS_WITH(format, "off8"));
int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField(); int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", offs8); "%d", offs8);
return 4; return 4;
} }
case 'p': { // 'pu: P and U bits for load and store instructions case 'p': { // 'pu: P and U bits for load and store instructions
@ -544,10 +544,10 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat. if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op if (format[6] == 'o') { // 'shift_op
ASSERT(STRING_STARTS_WITH(format, "shift_op")); ASSERT(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeField() == 0) { if (instr->TypeValue() == 0) {
PrintShiftRm(instr); PrintShiftRm(instr);
} else { } else {
ASSERT(instr->TypeField() == 1); ASSERT(instr->TypeValue() == 1);
PrintShiftImm(instr); PrintShiftImm(instr);
} }
return 8; return 8;
@ -562,7 +562,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
} }
} else if (format[1] == 'v') { // 'svc } else if (format[1] == 'v') { // 'svc
ASSERT(STRING_STARTS_WITH(format, "svc")); ASSERT(STRING_STARTS_WITH(format, "svc"));
PrintSoftwareInterrupt(instr->SvcField()); PrintSoftwareInterrupt(instr->SvcValue());
return 3; return 3;
} else if (format[1] == 'i') { // 'sign: signed extra loads and stores } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
ASSERT(STRING_STARTS_WITH(format, "sign")); ASSERT(STRING_STARTS_WITH(format, "sign"));
@ -579,12 +579,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
} }
case 't': { // 'target: target of branch instructions case 't': { // 'target: target of branch instructions
ASSERT(STRING_STARTS_WITH(format, "target")); ASSERT(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Field() << 2) + 8; int off = (instr->SImmed24Value() << 2) + 8;
out_buffer_pos_ += v8i::OS::SNPrintF( out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
out_buffer_ + out_buffer_pos_, "%+d -> %s",
"%+d -> %s", off,
off, converter_.NameOfAddress(
converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off)); reinterpret_cast<byte*>(instr) + off));
return 6; return 6;
} }
case 'u': { // 'u: signed or unsigned multiplies case 'u': { // 'u: signed or unsigned multiplies
@ -633,7 +633,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
// Format takes a formatting string for a whole instruction and prints it into // Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be // the output buffer. All escaped options are handed to FormatOption to be
// parsed further. // parsed further.
void Decoder::Format(Instr* instr, const char* format) { void Decoder::Format(Instruction* instr, const char* format) {
char cur = *format++; char cur = *format++;
while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
if (cur == '\'') { // Single quote is used as the formatting escape. if (cur == '\'') { // Single quote is used as the formatting escape.
@ -649,13 +649,13 @@ void Decoder::Format(Instr* instr, const char* format) {
// For currently unimplemented decodings the disassembler calls Unknown(instr) // For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits. // which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instr* instr) { void Decoder::Unknown(Instruction* instr) {
Format(instr, "unknown"); Format(instr, "unknown");
} }
void Decoder::DecodeType01(Instr* instr) { void Decoder::DecodeType01(Instruction* instr) {
int type = instr->TypeField(); int type = instr->TypeValue();
if ((type == 0) && instr->IsSpecialType0()) { if ((type == 0) && instr->IsSpecialType0()) {
// multiply instruction or extra loads and stores // multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) { if (instr->Bits(7, 4) == 9) {
@ -689,7 +689,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) { } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
// ldrd, strd // ldrd, strd
switch (instr->PUField()) { switch (instr->PUField()) {
case 0: { case da_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], -'rm"); Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
} else { } else {
@ -697,7 +697,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 1: { case ia_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn], +'rm"); Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
} else { } else {
@ -705,7 +705,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 2: { case db_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w"); Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
} else { } else {
@ -713,7 +713,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 3: { case ib_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w"); Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
} else { } else {
@ -730,7 +730,7 @@ void Decoder::DecodeType01(Instr* instr) {
} else { } else {
// extra load/store instructions // extra load/store instructions
switch (instr->PUField()) { switch (instr->PUField()) {
case 0: { case da_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm"); Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
} else { } else {
@ -738,7 +738,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 1: { case ia_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm"); Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
} else { } else {
@ -746,7 +746,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 2: { case db_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w"); Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
} else { } else {
@ -754,7 +754,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
break; break;
} }
case 3: { case ib_x: {
if (instr->Bit(22) == 0) { if (instr->Bit(22) == 0) {
Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w"); Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
} else { } else {
@ -772,7 +772,7 @@ void Decoder::DecodeType01(Instr* instr) {
} }
} else if ((type == 0) && instr->IsMiscType0()) { } else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) { if (instr->Bits(22, 21) == 1) {
switch (instr->Bits(7, 4)) { switch (instr->BitField(7, 4)) {
case BX: case BX:
Format(instr, "bx'cond 'rm"); Format(instr, "bx'cond 'rm");
break; break;
@ -787,7 +787,7 @@ void Decoder::DecodeType01(Instr* instr) {
break; break;
} }
} else if (instr->Bits(22, 21) == 3) { } else if (instr->Bits(22, 21) == 3) {
switch (instr->Bits(7, 4)) { switch (instr->BitField(7, 4)) {
case CLZ: case CLZ:
Format(instr, "clz'cond 'rd, 'rm"); Format(instr, "clz'cond 'rd, 'rm");
break; break;
@ -894,27 +894,27 @@ void Decoder::DecodeType01(Instr* instr) {
} }
void Decoder::DecodeType2(Instr* instr) { void Decoder::DecodeType2(Instruction* instr) {
switch (instr->PUField()) { switch (instr->PUField()) {
case 0: { case da_x: {
if (instr->HasW()) { if (instr->HasW()) {
Unknown(instr); // not used in V8 Unknown(instr); // not used in V8
} }
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12"); Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break; break;
} }
case 1: { case ia_x: {
if (instr->HasW()) { if (instr->HasW()) {
Unknown(instr); // not used in V8 Unknown(instr); // not used in V8
} }
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12"); Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break; break;
} }
case 2: { case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w"); Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
break; break;
} }
case 3: { case ib_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w"); Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
break; break;
} }
@ -927,14 +927,14 @@ void Decoder::DecodeType2(Instr* instr) {
} }
void Decoder::DecodeType3(Instr* instr) { void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) { switch (instr->PUField()) {
case 0: { case da_x: {
ASSERT(!instr->HasW()); ASSERT(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm"); Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break; break;
} }
case 1: { case ia_x: {
if (instr->HasW()) { if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1); ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) { if (instr->Bit(22) == 0x1) {
@ -947,11 +947,11 @@ void Decoder::DecodeType3(Instr* instr) {
} }
break; break;
} }
case 2: { case db_x: {
Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w"); Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
break; break;
} }
case 3: { case ib_x: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) { if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16)); uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7)); uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
@ -969,7 +969,7 @@ void Decoder::DecodeType3(Instr* instr) {
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7)); uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16)); uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
if (msbit >= lsbit) { if (msbit >= lsbit) {
if (instr->RmField() == 15) { if (instr->RmValue() == 15) {
Format(instr, "bfc'cond 'rd, 'f"); Format(instr, "bfc'cond 'rd, 'f");
} else { } else {
Format(instr, "bfi'cond 'rd, 'rm, 'f"); Format(instr, "bfi'cond 'rd, 'rm, 'f");
@ -991,7 +991,7 @@ void Decoder::DecodeType3(Instr* instr) {
} }
void Decoder::DecodeType4(Instr* instr) { void Decoder::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported. ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist"); Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
@ -1001,41 +1001,43 @@ void Decoder::DecodeType4(Instr* instr) {
} }
void Decoder::DecodeType5(Instr* instr) { void Decoder::DecodeType5(Instruction* instr) {
Format(instr, "b'l'cond 'target"); Format(instr, "b'l'cond 'target");
} }
void Decoder::DecodeType6(Instr* instr) { void Decoder::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr); DecodeType6CoprocessorIns(instr);
} }
int Decoder::DecodeType7(Instr* instr) { int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) { if (instr->Bit(24) == 1) {
if (instr->SvcField() >= stop) { if (instr->SvcValue() >= kStopCode) {
Format(instr, "stop'cond 'svc"); Format(instr, "stop'cond 'svc");
// Also print the stop message. Its address is encoded // Also print the stop message. Its address is encoded
// in the following 4 bytes. // in the following 4 bytes.
out_buffer_pos_ += out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "\n %p %08x stop message: %s",
"\n %p %08x stop message: %s", reinterpret_cast<int32_t*>(instr
reinterpret_cast<int32_t*>(instr + Instr::kInstrSize), + Instruction::kInstrSize),
*reinterpret_cast<char**>(instr + Instr::kInstrSize), *reinterpret_cast<char**>(instr
*reinterpret_cast<char**>(instr + Instr::kInstrSize)); + Instruction::kInstrSize),
// We have decoded 2 * Instr::kInstrSize bytes. *reinterpret_cast<char**>(instr
return 2 * Instr::kInstrSize; + Instruction::kInstrSize));
// We have decoded 2 * Instruction::kInstrSize bytes.
return 2 * Instruction::kInstrSize;
} else { } else {
Format(instr, "svc'cond 'svc"); Format(instr, "svc'cond 'svc");
} }
} else { } else {
DecodeTypeVFP(instr); DecodeTypeVFP(instr);
} }
return Instr::kInstrSize; return Instruction::kInstrSize;
} }
// void Decoder::DecodeTypeVFP(Instr* instr) // void Decoder::DecodeTypeVFP(Instruction* instr)
// vmov: Sn = Rt // vmov: Sn = Rt
// vmov: Rt = Sn // vmov: Rt = Sn
// vcvt: Dd = Sm // vcvt: Dd = Sm
@ -1048,34 +1050,34 @@ int Decoder::DecodeType7(Instr* instr) {
// vmrs // vmrs
// vmsr // vmsr
// Dd = vsqrt(Dm) // Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instr* instr) { void Decoder::DecodeTypeVFP(Instruction* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5); ASSERT(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) { if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) { if (instr->Opc1Value() == 0x7) {
// Other data processing instructions // Other data processing instructions
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) { if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register. // vmov register to register.
if (instr->SzField() == 0x1) { if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'Dm"); Format(instr, "vmov.f64'cond 'Dd, 'Dm");
} else { } else {
Format(instr, "vmov.f32'cond 'Sd, 'Sm"); Format(instr, "vmov.f32'cond 'Sd, 'Sm");
} }
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr); DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) { } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr); DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() >> 1) == 0x6) && } else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Field() & 0x1)) { (instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr); DecodeVCVTBetweenFloatingPointAndInteger(instr);
} else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && } else if (((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Field() & 0x1)) { (instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr); DecodeVCMP(instr);
} else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) { } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
Format(instr, "vsqrt.f64'cond 'Dd, 'Dm"); Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
} else if (instr->Opc3Field() == 0x0) { } else if (instr->Opc3Value() == 0x0) {
if (instr->SzField() == 0x1) { if (instr->SzValue() == 0x1) {
Format(instr, "vmov.f64'cond 'Dd, 'd"); Format(instr, "vmov.f64'cond 'Dd, 'd");
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
@ -1083,9 +1085,9 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else if (instr->Opc1Field() == 0x3) { } else if (instr->Opc1Value() == 0x3) {
if (instr->SzField() == 0x1) { if (instr->SzValue() == 0x1) {
if (instr->Opc3Field() & 0x1) { if (instr->Opc3Value() & 0x1) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else { } else {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
@ -1093,14 +1095,14 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) { } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzField() == 0x1) { if (instr->SzValue() == 0x1) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) { } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzField() == 0x1) { if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else { } else {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
@ -1109,13 +1111,13 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
} }
} else { } else {
if ((instr->VCField() == 0x0) && if ((instr->VCValue() == 0x0) &&
(instr->VAField() == 0x0)) { (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VCField() == 0x0) && } else if ((instr->VCValue() == 0x0) &&
(instr->VAField() == 0x7) && (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) { (instr->Bits(19, 16) == 0x1)) {
if (instr->VLField() == 0) { if (instr->VLValue() == 0) {
if (instr->Bits(15, 12) == 0xF) { if (instr->Bits(15, 12) == 0xF) {
Format(instr, "vmsr'cond FPSCR, APSR"); Format(instr, "vmsr'cond FPSCR, APSR");
} else { } else {
@ -1133,11 +1135,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) {
} }
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) { void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) && Instruction* instr) {
(instr->VAField() == 0x0)); ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
bool to_arm_register = (instr->VLField() == 0x1); bool to_arm_register = (instr->VLValue() == 0x1);
if (to_arm_register) { if (to_arm_register) {
Format(instr, "vmov'cond 'rt, 'Sn"); Format(instr, "vmov'cond 'rt, 'Sn");
@ -1147,19 +1150,19 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
} }
void Decoder::DecodeVCMP(Instr* instr) { void Decoder::DecodeVCMP(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Field() & 0x1)); (instr->Opc3Value() & 0x1));
// Comparison. // Comparison.
bool dp_operation = (instr->SzField() == 1); bool dp_operation = (instr->SzValue() == 1);
bool raise_exception_for_qnan = (instr->Bit(7) == 0x1); bool raise_exception_for_qnan = (instr->Bit(7) == 0x1);
if (dp_operation && !raise_exception_for_qnan) { if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Field() == 0x4) { if (instr->Opc2Value() == 0x4) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if (instr->Opc2Field() == 0x5) { } else if (instr->Opc2Value() == 0x5) {
Format(instr, "vcmp.f64'cond 'Dd, #0.0"); Format(instr, "vcmp.f64'cond 'Dd, #0.0");
} else { } else {
Unknown(instr); // invalid Unknown(instr); // invalid
@ -1170,11 +1173,11 @@ void Decoder::DecodeVCMP(Instr* instr) {
} }
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) { void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)); ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
bool double_to_single = (instr->SzField() == 1); bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) { if (double_to_single) {
Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm"); Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
@ -1184,13 +1187,13 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
} }
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) || ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1))); (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1); bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1); bool dp_operation = (instr->SzValue() == 1);
if (to_integer) { if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0); bool unsigned_integer = (instr->Bit(16) == 0);
@ -1232,11 +1235,11 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
// <Rt, Rt2> = vmov(Dm) // <Rt, Rt2> = vmov(Dm)
// Ddst = MEM(Rbase + 4*offset). // Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc. // MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instr* instr) { void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
ASSERT((instr->TypeField() == 6)); ASSERT(instr->TypeValue() == 6);
if (instr->CoprocessorField() == 0xA) { if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeField()) { switch (instr->OpcodeValue()) {
case 0x8: case 0x8:
case 0xA: case 0xA:
if (instr->HasL()) { if (instr->HasL()) {
@ -1257,8 +1260,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
break; break;
} }
} else if (instr->CoprocessorField() == 0xB) { } else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeField()) { switch (instr->OpcodeValue()) {
case 0x2: case 0x2:
// Load and store double to two GP registers // Load and store double to two GP registers
if (instr->Bits(7, 4) != 0x1) { if (instr->Bits(7, 4) != 0x1) {
@ -1295,16 +1298,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
// Disassemble the instruction at *instr_ptr into the output buffer. // Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) { int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr); Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes. // Print raw instruction bytes.
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ", "%08x ",
instr->InstructionBits()); instr->InstructionBits());
if (instr->ConditionField() == special_condition) { if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return Instr::kInstrSize; return Instruction::kInstrSize;
} }
switch (instr->TypeField()) { switch (instr->TypeValue()) {
case 0: case 0:
case 1: { case 1: {
DecodeType01(instr); DecodeType01(instr);
@ -1339,11 +1342,11 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
break; break;
} }
} }
return Instr::kInstrSize; return Instruction::kInstrSize;
} }
} } // namespace assembler::arm } } // namespace v8::internal
@ -1351,8 +1354,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
namespace disasm { namespace disasm {
namespace v8i = v8::internal;
const char* NameConverter::NameOfAddress(byte* addr) const { const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer; static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
@ -1367,7 +1368,7 @@ const char* NameConverter::NameOfConstant(byte* addr) const {
const char* NameConverter::NameOfCPURegister(int reg) const { const char* NameConverter::NameOfCPURegister(int reg) const {
return assembler::arm::Registers::Name(reg); return v8::internal::Registers::Name(reg);
} }
@ -1401,7 +1402,7 @@ Disassembler::~Disassembler() {}
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte* instruction) { byte* instruction) {
assembler::arm::Decoder d(converter_, buffer); v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction); return d.InstructionDecode(instruction);
} }

11
deps/v8/src/arm/frames-arm.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -30,20 +30,13 @@
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h" #include "frames-inl.h"
#include "arm/assembler-arm-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) { Address ExitFrame::ComputeStackPointer(Address fp) {
Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset); return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
Address sp = fp + ExitFrameConstants::kSPOffset;
if (marker == NULL) {
sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
}
return sp;
} }

18
deps/v8/src/arm/frames-arm.h

@ -107,21 +107,17 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic {
public: public:
static const int kCodeOffset = -1 * kPointerSize; static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize;
// TODO(regis): Use a patched sp value on the stack instead.
// A marker of 0 indicates that double registers are saved.
static const int kMarkerOffset = -2 * kPointerSize;
// The caller fields are below the frame pointer on the stack. // The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;
// The calling JS function is between FP and PC. // The calling JS function is below FP.
static const int kCallerPCOffset = +2 * kPointerSize; static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just // FP-relative displacement of the caller's SP. It points just
// below the saved PC. // below the saved PC.
static const int kCallerSPDisplacement = +3 * kPointerSize; static const int kCallerSPDisplacement = 2 * kPointerSize;
}; };
@ -131,8 +127,8 @@ class StandardFrameConstants : public AllStatic {
static const int kMarkerOffset = -2 * kPointerSize; static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize; static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize; static const int kCallerPCOffset = 1 * kPointerSize;
static const int kCallerSPOffset = +2 * kPointerSize; static const int kCallerSPOffset = 2 * kPointerSize;
}; };

79
deps/v8/src/arm/full-codegen-arm.cc

@ -92,7 +92,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1. // Argument to NewContext is the function, which is in r1.
@ -517,16 +517,16 @@ void FullCodeGenerator::DoTest(Label* if_true,
} }
void FullCodeGenerator::Split(Condition cc, void FullCodeGenerator::Split(Condition cond,
Label* if_true, Label* if_true,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
if (if_false == fall_through) { if (if_false == fall_through) {
__ b(cc, if_true); __ b(cond, if_true);
} else if (if_true == fall_through) { } else if (if_true == fall_through) {
__ b(NegateCondition(cc), if_false); __ b(NegateCondition(cond), if_false);
} else { } else {
__ b(cc, if_true); __ b(cond, if_true);
__ b(if_false); __ b(if_false);
} }
} }
@ -734,6 +734,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies. // Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) { for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i); CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through. // The default is not a test, but remember it as final fall through.
if (clause->is_default()) { if (clause->is_default()) {
default_clause = clause; default_clause = clause;
@ -817,7 +819,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the object to a JS object. // Convert the object to a JS object.
Label convert, done_convert; Label convert, done_convert;
__ BranchOnSmi(r0, &convert); __ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(hs, &done_convert); __ b(hs, &done_convert);
__ bind(&convert); __ bind(&convert);
@ -1548,8 +1550,13 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
void FullCodeGenerator::EmitBinaryOp(Token::Value op, void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ pop(r1); __ pop(r1);
GenericBinaryOpStub stub(op, mode, r1, r0); if (op == Token::ADD || op == Token::SUB || op == Token::MUL) {
__ CallStub(&stub); TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
}
context()->Plug(r0); context()->Plug(r0);
} }
@ -2130,7 +2137,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex); __ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r0, ip); __ cmp(r0, ip);
__ b(eq, if_true); __ b(eq, if_true);
@ -2162,7 +2169,7 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through); Split(ge, if_true, if_false, fall_through);
@ -2183,7 +2190,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable)); __ tst(r1, Operand(1 << Map::kIsUndetectable));
@ -2229,7 +2236,7 @@ void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
@ -2250,7 +2257,7 @@ void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
@ -2271,7 +2278,7 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
__ BranchOnSmi(r0, if_false); __ JumpIfSmi(r0, if_false);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through); Split(eq, if_true, if_false, fall_through);
@ -2378,7 +2385,7 @@ void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null. // If the object is a smi, we return null.
__ BranchOnSmi(r0, &null); __ JumpIfSmi(r0, &null);
// Check that the object is a JS object but take special care of JS // Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class. // functions to make sure they have 'Function' as their class.
@ -2529,7 +2536,7 @@ void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
Label done; Label done;
// If the object is a smi return the object. // If the object is a smi return the object.
__ BranchOnSmi(r0, &done); __ JumpIfSmi(r0, &done);
// If the object is not a value type, return the object. // If the object is not a value type, return the object.
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE); __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
__ b(ne, &done); __ b(ne, &done);
@ -2559,7 +2566,7 @@ void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
Label done; Label done;
// If the object is a smi, return the value. // If the object is a smi, return the value.
__ BranchOnSmi(r1, &done); __ JumpIfSmi(r1, &done);
// If the object is not a value type, return the value. // If the object is not a value type, return the value.
__ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE); __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
@ -2992,22 +2999,20 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) { if (prop != NULL) {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForStackValue(prop->key()); VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) { } else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand()); __ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name())); __ mov(r0, Operand(var->name()));
__ Push(r1, r0); __ Push(r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else { } else {
// Non-global variable. Call the runtime to look up the context // Non-global variable. Call the runtime to delete from the
// where the variable was introduced. // context where the variable was introduced.
__ push(context_register()); __ push(context_register());
__ mov(r2, Operand(var->name())); __ mov(r2, Operand(var->name()));
__ push(r2); __ push(r2);
__ CallRuntime(Runtime::kLookupContext, 2); __ CallRuntime(Runtime::kDeleteContextSlot, 2);
__ push(r0);
__ mov(r2, Operand(var->name()));
__ push(r2);
} }
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0); context()->Plug(r0);
} }
break; break;
@ -3084,7 +3089,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool inline_smi_code = ShouldInlineSmiCase(expr->op()); bool inline_smi_code = ShouldInlineSmiCase(expr->op());
if (inline_smi_code) { if (inline_smi_code) {
Label call_stub; Label call_stub;
__ BranchOnNotSmi(r0, &call_stub); __ JumpIfNotSmi(r0, &call_stub);
__ mvn(r0, Operand(r0)); __ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag. // Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask)); __ bic(r0, r0, Operand(kSmiTagMask));
@ -3171,7 +3176,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call ToNumber only if operand is not a smi. // Call ToNumber only if operand is not a smi.
Label no_conversion; Label no_conversion;
__ BranchOnSmi(r0, &no_conversion); __ JumpIfSmi(r0, &no_conversion);
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS); __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
__ bind(&no_conversion); __ bind(&no_conversion);
@ -3205,7 +3210,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(vs, &stub_call); __ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at // We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber. // the first smi check before calling ToNumber.
__ BranchOnSmi(r0, &done); __ JumpIfSmi(r0, &done);
__ bind(&stub_call); __ bind(&stub_call);
// Call stub. Undo operation first. // Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value))); __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@ -3458,34 +3463,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: { default: {
VisitForAccumulatorValue(expr->right()); VisitForAccumulatorValue(expr->right());
Condition cc = eq; Condition cond = eq;
bool strict = false; bool strict = false;
switch (op) { switch (op) {
case Token::EQ_STRICT: case Token::EQ_STRICT:
strict = true; strict = true;
// Fall through // Fall through
case Token::EQ: case Token::EQ:
cc = eq; cond = eq;
__ pop(r1); __ pop(r1);
break; break;
case Token::LT: case Token::LT:
cc = lt; cond = lt;
__ pop(r1); __ pop(r1);
break; break;
case Token::GT: case Token::GT:
// Reverse left and right sides to obtain ECMA-262 conversion order. // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = lt; cond = lt;
__ mov(r1, result_register()); __ mov(r1, result_register());
__ pop(r0); __ pop(r0);
break; break;
case Token::LTE: case Token::LTE:
// Reverse left and right sides to obtain ECMA-262 conversion order. // Reverse left and right sides to obtain ECMA-262 conversion order.
cc = ge; cond = ge;
__ mov(r1, result_register()); __ mov(r1, result_register());
__ pop(r0); __ pop(r0);
break; break;
case Token::GTE: case Token::GTE:
cc = ge; cond = ge;
__ pop(r1); __ pop(r1);
break; break;
case Token::IN: case Token::IN:
@ -3498,19 +3503,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
if (inline_smi_code) { if (inline_smi_code) {
Label slow_case; Label slow_case;
__ orr(r2, r0, Operand(r1)); __ orr(r2, r0, Operand(r1));
__ BranchOnNotSmi(r2, &slow_case); __ JumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0); __ cmp(r1, r0);
Split(cc, if_true, if_false, NULL); Split(cond, if_true, if_false, NULL);
__ bind(&slow_case); __ bind(&slow_case);
} }
CompareFlags flags = inline_smi_code CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB ? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS; : NO_COMPARE_FLAGS;
CompareStub stub(cc, strict, flags, r1, r0); CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub); __ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE)); __ cmp(r0, Operand(0, RelocInfo::NONE));
Split(cc, if_true, if_false, fall_through); Split(cond, if_true, if_false, fall_through);
} }
} }

29
deps/v8/src/arm/ic-arm.cc

@ -95,13 +95,13 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
__ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
__ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
(1 << Map::kHasNamedInterceptor))); (1 << Map::kHasNamedInterceptor)));
__ b(nz, miss); __ b(ne, miss);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex); __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(t1, ip); __ cmp(t1, ip);
__ b(nz, miss); __ b(ne, miss);
} }
@ -379,7 +379,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
} }
void LoadIC::GenerateStringLength(MacroAssembler* masm) { void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r2 : name // -- r2 : name
// -- lr : return address // -- lr : return address
@ -388,7 +388,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss); StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
support_wrappers);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -419,14 +420,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
int interceptor_bit, int interceptor_bit,
Label* slow) { Label* slow) {
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow); __ JumpIfSmi(receiver, slow);
// Get the map of the receiver. // Get the map of the receiver.
__ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field. // Check bit field.
__ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch, __ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow); __ b(ne, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, // In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string // we enter the runtime system to make sure that indexing into string
@ -749,7 +750,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
Label index_smi, index_string; Label index_smi, index_string;
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(r2, &check_string); __ JumpIfNotSmi(r2, &check_string);
__ bind(&index_smi); __ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
@ -1165,7 +1166,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Register receiver = r1; Register receiver = r1;
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(key, &check_string); __ JumpIfNotSmi(key, &check_string);
__ bind(&index_smi); __ bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below // Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi. // where a numeric string is converted to a smi.
@ -1346,7 +1347,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
Label slow; Label slow;
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow); __ JumpIfSmi(r1, &slow);
// Check that the key is an array index, that is Uint32. // Check that the key is an array index, that is Uint32.
__ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
@ -1470,7 +1471,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(ne, &slow); __ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the // Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp. // runtime to convert and clamp.
__ BranchOnNotSmi(value, &slow); __ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key. __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset)); __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip)); __ cmp(r4, Operand(ip));
@ -1589,7 +1590,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
Register scratch = r3; Register scratch = r3;
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss); __ JumpIfSmi(receiver, &miss);
// Check that the object is a JS array. // Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@ -1603,7 +1604,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss); __ b(ne, &miss);
// Check that value is a smi. // Check that value is a smi.
__ BranchOnNotSmi(value, &miss); __ JumpIfNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength. // Prepare tail call to StoreIC_ArrayLength.
__ Push(receiver, value); __ Push(receiver, value);
@ -1673,7 +1674,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
return ge; return ge;
default: default:
UNREACHABLE(); UNREACHABLE();
return no_condition; return kNoCondition;
} }
} }
@ -1704,7 +1705,7 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
void PatchInlinedSmiCode(Address address) { void PatchInlinedSmiCode(Address address) {
UNIMPLEMENTED(); // Currently there is no smi inlining in the ARM full code generator.
} }

8
deps/v8/src/arm/jump-target-arm.cc

@ -76,7 +76,7 @@ void JumpTarget::DoJump() {
} }
void JumpTarget::DoBranch(Condition cc, Hint ignored) { void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame()); ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) { if (entry_frame_set_) {
@ -86,7 +86,7 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame())); ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
} }
// We have an expected frame to merge to on the backward edge. // We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cc); cgen()->frame()->MergeTo(&entry_frame_, cond);
} else { } else {
// Clone the current frame to use as the expected one at the target. // Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame()); set_entry_frame(cgen()->frame());
@ -98,8 +98,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
// frame with less precise type info branches to them. // frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY); ASSERT(direction_ != FORWARD_ONLY);
} }
__ b(cc, &entry_label_); __ b(cond, &entry_label_);
if (cc == al) { if (cond == al) {
cgen()->DeleteFrame(); cgen()->DeleteFrame();
} }
} }

29
deps/v8/src/arm/lithium-arm.cc

@ -820,6 +820,7 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
return MarkAsCall(DefineFixed(result, r0), instr); return MarkAsCall(DefineFixed(result, r0), instr);
} }
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building()); ASSERT(is_building());
current_block_ = block; current_block_ = block;
@ -1018,11 +1019,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HIsObject* compare = HIsObject::cast(v); HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister(); LOperand* temp = TempRegister();
LOperand* temp2 = TempRegister(); return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
} else if (v->IsCompareJSObjectEq()) { } else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1030,8 +1028,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsInstanceOf()) { } else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v); HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result = LInstruction* result =
new LInstanceOfAndBranch(Use(instance_of->left()), new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
Use(instance_of->right())); UseFixed(instance_of->right(), r1));
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) { } else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v); HTypeofIs* typeof_is = HTypeofIs::cast(v);
@ -1133,7 +1131,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathAbs: case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor: case kMathFloor:
return AssignEnvironment(DefineAsRegister(result)); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt: case kMathSqrt:
return DefineSameAsFirst(result); return DefineSameAsFirst(result);
case kMathRound: case kMathRound:
@ -1313,8 +1311,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right); LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub); LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) { if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1404,7 +1402,7 @@ LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value()); LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsObject(value, TempRegister())); return DefineAsRegister(new LIsObject(value));
} }
@ -1604,7 +1602,14 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
return new LStoreGlobal(UseRegisterAtStart(instr->value())); if (instr->check_hole_value()) {
LOperand* temp = TempRegister();
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(new LStoreGlobal(value, temp));
} else {
LOperand* value = UseRegisterAtStart(instr->value());
return new LStoreGlobal(value, NULL);
}
} }

11
deps/v8/src/arm/lithium-arm.h

@ -734,9 +734,8 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
class LIsObject: public LTemplateInstruction<1, 1, 1> { class LIsObject: public LTemplateInstruction<1, 1, 1> {
public: public:
LIsObject(LOperand* value, LOperand* temp) { explicit LIsObject(LOperand* value) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp;
} }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object") DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
@ -745,10 +744,9 @@ class LIsObject: public LTemplateInstruction<1, 1, 1> {
class LIsObjectAndBranch: public LControlInstruction<1, 2> { class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public: public:
LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) { LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp; temps_[0] = temp;
temps_[1] = temp2;
} }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch") DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@ -1256,10 +1254,11 @@ class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
}; };
class LStoreGlobal: public LTemplateInstruction<0, 1, 0> { class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
public: public:
explicit LStoreGlobal(LOperand* value) { LStoreGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value; inputs_[0] = value;
temps_[0] = temp;
} }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global") DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")

162
deps/v8/src/arm/lithium-codegen-arm.cc

@ -661,7 +661,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return; return;
} }
if (cc == no_condition) { if (cc == kNoCondition) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY); __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else { } else {
@ -736,37 +736,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
} }
void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(
int deoptimization_index) { LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index); kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i); LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) { if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index()); safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
} }
} }
if (kind & Safepoint::kWithRegisters) {
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
} }
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index) { int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
Safepoint safepoint = deoptimization_index);
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
} }
@ -774,20 +777,8 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles(
LPointerMap* pointers, LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index) { int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
Safepoint safepoint = deoptimization_index);
safepoints_.DefineSafepointWithRegistersAndDoubles(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register cp always contains a pointer to the context.
safepoint.DefinePointerRegister(cp);
} }
@ -1080,7 +1071,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(deferred->exit()); __ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize. // If the result in r0 is a Smi, untag it, else deoptimize.
__ BranchOnNotSmi(result, &deoptimize); __ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result); __ SmiUntag(result);
__ b(al, &done); __ b(al, &done);
@ -1160,7 +1151,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(deferred->exit()); __ bind(deferred->exit());
// If the result in r0 is a Smi, untag it, else deoptimize. // If the result in r0 is a Smi, untag it, else deoptimize.
__ BranchOnNotSmi(result, &deoptimize); __ JumpIfNotSmi(result, &deoptimize);
__ SmiUntag(result); __ SmiUntag(result);
__ b(&done); __ b(&done);
@ -1216,7 +1207,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(ne, &done); __ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) { if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) { if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(no_condition, instr->environment()); DeoptimizeIf(kNoCondition, instr->environment());
} }
} else { } else {
// Test the non-zero operand for negative sign. // Test the non-zero operand for negative sign.
@ -1483,7 +1474,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (r.IsInteger32()) { if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0)); Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0)); __ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, nz); EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) { } else if (r.IsDouble()) {
DoubleRegister reg = ToDoubleRegister(instr->InputAt(0)); DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0(); Register scratch = scratch0();
@ -1541,7 +1532,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CallStub(&stub); __ CallStub(&stub);
__ cmp(reg, Operand(0)); __ cmp(reg, Operand(0));
__ ldm(ia_w, sp, saved_regs); __ ldm(ia_w, sp, saved_regs);
EmitBranch(true_block, false_block, nz); EmitBranch(true_block, false_block, ne);
} }
} }
} }
@ -1593,7 +1584,7 @@ void LCodeGen::DoGoto(LGoto* instr) {
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition; Condition cond = kNoCondition;
switch (op) { switch (op) {
case Token::EQ: case Token::EQ:
case Token::EQ_STRICT: case Token::EQ_STRICT:
@ -1730,18 +1721,62 @@ Condition LCodeGen::EmitIsObject(Register input,
Register temp2, Register temp2,
Label* is_not_object, Label* is_not_object,
Label* is_object) { Label* is_object) {
Abort("EmitIsObject unimplemented."); __ JumpIfSmi(input, is_not_object);
return ne;
__ LoadRoot(temp1, Heap::kNullValueRootIndex);
__ cmp(input, temp1);
__ b(eq, is_object);
// Load map.
__ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
__ tst(temp2, Operand(1 << Map::kIsUndetectable));
__ b(ne, is_not_object);
// Load instance type and check that it is in object type range.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
__ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, is_not_object);
__ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
return le;
} }
void LCodeGen::DoIsObject(LIsObject* instr) { void LCodeGen::DoIsObject(LIsObject* instr) {
Abort("DoIsObject unimplemented."); Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register temp = scratch0();
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
__ b(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
} }
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Abort("DoIsObjectAndBranch unimplemented."); Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond =
EmitIsObject(reg, temp1, temp2, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
} }
@ -1956,7 +1991,16 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
Abort("DoInstanceOfAndBranch unimplemented."); ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ tst(r0, Operand(r0));
EmitBranch(true_block, false_block, eq);
} }
@ -1989,7 +2033,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
ASSERT(result.is(r0)); ASSERT(result.is(r0));
// A Smi is not instance of anything. // A Smi is not instance of anything.
__ BranchOnSmi(object, &false_result); __ JumpIfSmi(object, &false_result);
// This is the inlined call site instanceof cache. The two occurences of the // This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the // hole value will be patched to the last map/result pair generated by the
@ -2092,7 +2136,7 @@ static Condition ComputeCompareCondition(Token::Value op) {
return ge; return ge;
default: default:
UNREACHABLE(); UNREACHABLE();
return no_condition; return kNoCondition;
} }
} }
@ -2151,8 +2195,26 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0)); Register value = ToRegister(instr->InputAt(0));
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); Register scratch = scratch0();
__ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
// Load the cell.
__ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted.
if (instr->hydrogen()->check_hole_value()) {
Register scratch2 = ToRegister(instr->TempAt(0));
__ ldr(scratch2,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch2, ip);
DeoptimizeIf(eq, instr->environment());
}
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
} }
@ -2565,7 +2627,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
new DeferredMathAbsTaggedHeapNumber(this, instr); new DeferredMathAbsTaggedHeapNumber(this, instr);
Register input = ToRegister(instr->InputAt(0)); Register input = ToRegister(instr->InputAt(0));
// Smi check. // Smi check.
__ BranchOnNotSmi(input, deferred->entry()); __ JumpIfNotSmi(input, deferred->entry());
// If smi, handle it directly. // If smi, handle it directly.
EmitIntegerMathAbs(instr); EmitIntegerMathAbs(instr);
__ bind(deferred->exit()); __ bind(deferred->exit());
@ -3512,7 +3574,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Label* false_label, Label* false_label,
Register input, Register input,
Handle<String> type_name) { Handle<String> type_name) {
Condition final_branch_condition = no_condition; Condition final_branch_condition = kNoCondition;
Register scratch = scratch0(); Register scratch = scratch0();
if (type_name->Equals(Heap::number_symbol())) { if (type_name->Equals(Heap::number_symbol())) {
__ tst(input, Operand(kSmiTagMask)); __ tst(input, Operand(kSmiTagMask));
@ -3597,7 +3659,7 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) { void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(no_condition, instr->environment()); DeoptimizeIf(kNoCondition, instr->environment());
} }

4
deps/v8/src/arm/lithium-codegen-arm.h

@ -223,6 +223,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSqrt(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information. // Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,

144
deps/v8/src/arm/macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
CheckConstPool(true, true); CheckConstPool(true, true);
add(pc, pc, Operand(index, add(pc, pc, Operand(index,
LSL, LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); Instruction::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment. nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) { for (int i = 0; i < targets.length(); i++) {
@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object,
void MacroAssembler::InNewSpace(Register object, void MacroAssembler::InNewSpace(Register object,
Register scratch, Register scratch,
Condition cc, Condition cond,
Label* branch) { Label* branch) {
ASSERT(cc == eq || cc == ne); ASSERT(cond == eq || cond == ne);
and_(scratch, object, Operand(ExternalReference::new_space_mask())); and_(scratch, object, Operand(ExternalReference::new_space_mask()));
cmp(scratch, Operand(ExternalReference::new_space_start())); cmp(scratch, Operand(ExternalReference::new_space_start()));
b(cc, branch); b(cond, branch);
} }
@ -615,37 +615,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles) { void MacroAssembler::EnterExitFrame(bool save_doubles) {
// r0 is argc. // Compute the argv pointer in a callee-saved register.
// Compute callee's stack pointer before making changes and save it as add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
// ip register so that it is restored as sp register on exit, thereby sub(r6, r6, Operand(kPointerSize));
// popping the args.
// Setup the frame structure on the stack.
// ip = sp + kPointerSize * #args; ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
// Compute the argv pointer and keep it in a callee-saved register. Push(lr, fp);
sub(r6, ip, Operand(kPointerSize));
// Prepare the stack to be aligned when calling into C. After this point there
// are 5 pushes before the call into C, so the stack needs to be aligned after
// 5 pushes.
int frame_alignment = ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment != kPointerSize) {
// The following code needs to be more general if this assert does not hold.
ASSERT(frame_alignment == 2 * kPointerSize);
// With 5 pushes left the frame must be unaligned at this point.
mov(r7, Operand(Smi::FromInt(0)));
tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
push(r7, eq); // Push if aligned to make it unaligned.
}
// Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // Setup new frame pointer. mov(fp, Operand(sp)); // Setup new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (FLAG_debug_code) {
mov(ip, Operand(0));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject())); mov(ip, Operand(CodeObject()));
push(ip); // Accessed from ExitFrame::code_slot. str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top. // Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@ -659,25 +646,30 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
// Optionally save all double registers. // Optionally save all double registers.
if (save_doubles) { if (save_doubles) {
// TODO(regis): Use vstrm instruction. sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
// The stack alignment code above made sp unaligned, so add space for one const int offset = -2 * kPointerSize;
// more double register and use aligned addresses.
ASSERT(kDoubleSize == frame_alignment);
// Mark the frame as containing doubles by pushing a non-valid return
// address, i.e. 0.
ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
mov(ip, Operand(0)); // Marker and alignment word.
push(ip);
int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
sub(sp, sp, Operand(space));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i); DwVfpRegister reg = DwVfpRegister::from_code(i);
vstr(reg, sp, i * kDoubleSize + kPointerSize); vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
} }
// Note that d0 will be accessible at fp - 2*kPointerSize - // Note that d0 will be accessible at
// DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
// alignment word were pushed after the fp. // since the sp slot and code slot were pushed after the fp.
}
// Reserve place for the return address and align the frame preparing for
// calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand(kPointerSize));
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
} }
// Set the exit frame sp value to point just before the return address
// location.
add(ip, sp, Operand(kPointerSize));
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
} }
@ -715,12 +707,10 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles) { void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers. // Optionally restore all double registers.
if (save_doubles) { if (save_doubles) {
// TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i); DwVfpRegister reg = DwVfpRegister::from_code(i);
// Register d15 is just below the marker. const int offset = -2 * kPointerSize;
const int offset = ExitFrameConstants::kMarkerOffset; vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
} }
} }
@ -736,9 +726,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
str(r3, MemOperand(ip)); str(r3, MemOperand(ip));
#endif #endif
// Pop the arguments, restore registers, and return. // Tear down the exit frame, pop the arguments, and return. Callee-saved
mov(sp, Operand(fp)); // respect ABI stack constraint // register r4 still holds argc.
ldm(ia, sp, fp.bit() | sp.bit() | pc.bit()); mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
mov(pc, lr);
} }
@ -933,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object,
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
tst(scratch, Operand(kIsNotStringMask)); tst(scratch, Operand(kIsNotStringMask));
b(nz, fail); b(ne, fail);
} }
@ -1392,7 +1385,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail, Label* fail,
bool is_heap_object) { bool is_heap_object) {
if (!is_heap_object) { if (!is_heap_object) {
BranchOnSmi(obj, fail); JumpIfSmi(obj, fail);
} }
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map)); mov(ip, Operand(map));
@ -1407,7 +1400,7 @@ void MacroAssembler::CheckMap(Register obj,
Label* fail, Label* fail,
bool is_heap_object) { bool is_heap_object) {
if (!is_heap_object) { if (!is_heap_object) {
BranchOnSmi(obj, fail); JumpIfSmi(obj, fail);
} }
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(ip, index); LoadRoot(ip, index);
@ -1421,7 +1414,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
Register scratch, Register scratch,
Label* miss) { Label* miss) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
BranchOnSmi(function, miss); JumpIfSmi(function, miss);
// Check that the function really is a function. Load map into result reg. // Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
@ -1520,7 +1513,7 @@ void MacroAssembler::ObjectToDoubleVFPRegister(Register object,
Label done; Label done;
if ((flags & OBJECT_NOT_SMI) == 0) { if ((flags & OBJECT_NOT_SMI) == 0) {
Label not_smi; Label not_smi;
BranchOnNotSmi(object, &not_smi); JumpIfNotSmi(object, &not_smi);
// Remove smi tag and convert to double. // Remove smi tag and convert to double.
mov(scratch1, Operand(object, ASR, kSmiTagSize)); mov(scratch1, Operand(object, ASR, kSmiTagSize));
vmov(scratch3, scratch1); vmov(scratch3, scratch1);
@ -1813,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
} }
void MacroAssembler::Assert(Condition cc, const char* msg) { void MacroAssembler::Assert(Condition cond, const char* msg) {
if (FLAG_debug_code) if (FLAG_debug_code)
Check(cc, msg); Check(cond, msg);
} }
@ -1848,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) {
} }
void MacroAssembler::Check(Condition cc, const char* msg) { void MacroAssembler::Check(Condition cond, const char* msg) {
Label L; Label L;
b(cc, &L); b(cond, &L);
Abort(msg); Abort(msg);
// will not return here // will not return here
bind(&L); bind(&L);
@ -1946,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
void MacroAssembler::JumpIfNotBothSmi(Register reg1, void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2, Register reg2,
Label* on_not_both_smi) { Label* on_not_both_smi) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask)); tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), eq); tst(reg2, Operand(kSmiTagMask), eq);
b(ne, on_not_both_smi); b(ne, on_not_both_smi);
@ -1956,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
void MacroAssembler::JumpIfEitherSmi(Register reg1, void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2, Register reg2,
Label* on_either_smi) { Label* on_either_smi) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(reg1, Operand(kSmiTagMask)); tst(reg1, Operand(kSmiTagMask));
tst(reg2, Operand(kSmiTagMask), ne); tst(reg2, Operand(kSmiTagMask), ne);
b(eq, on_either_smi); b(eq, on_either_smi);
@ -1964,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::AbortIfSmi(Register object) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask)); tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is a smi"); Assert(ne, "Operand is a smi");
} }
void MacroAssembler::AbortIfNotSmi(Register object) { void MacroAssembler::AbortIfNotSmi(Register object) {
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask)); tst(object, Operand(kSmiTagMask));
Assert(eq, "Operand is not smi"); Assert(eq, "Operand is not smi");
} }
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number) {
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
cmp(scratch, heap_number_map);
b(ne, on_not_heap_number);
}
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first, Register first,
Register second, Register second,
@ -2003,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
Register scratch2, Register scratch2,
Label* failure) { Label* failure) {
// Check that neither is a smi. // Check that neither is a smi.
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second)); and_(scratch1, first, Operand(second));
tst(scratch1, Operand(kSmiTagMask)); tst(scratch1, Operand(kSmiTagMask));
b(eq, failure); b(eq, failure);

42
deps/v8/src/arm/macro-assembler-arm.h

@ -139,7 +139,7 @@ class MacroAssembler: public Assembler {
// scratch can be object itself, but it will be clobbered. // scratch can be object itself, but it will be clobbered.
void InNewSpace(Register object, void InNewSpace(Register object,
Register scratch, Register scratch,
Condition cc, // eq for new space, ne otherwise Condition cond, // eq for new space, ne otherwise
Label* branch); Label* branch);
@ -545,16 +545,6 @@ class MacroAssembler: public Assembler {
} }
inline void BranchOnSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Generates code for reporting that an illegal operation has // Generates code for reporting that an illegal operation has
// occurred. // occurred.
void IllegalOperation(int num_arguments); void IllegalOperation(int num_arguments);
@ -695,14 +685,14 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugging // Debugging
// Calls Abort(msg) if the condition cc is not satisfied. // Calls Abort(msg) if the condition cond is not satisfied.
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cc, const char* msg); void Assert(Condition cond, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements); void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cc, const char* msg); void Check(Condition cond, const char* msg);
// Print a message to stdout and abort execution. // Print a message to stdout and abort execution.
void Abort(const char* msg); void Abort(const char* msg);
@ -719,6 +709,9 @@ class MacroAssembler: public Assembler {
void SmiTag(Register reg, SBit s = LeaveCC) { void SmiTag(Register reg, SBit s = LeaveCC) {
add(reg, reg, Operand(reg), s); add(reg, reg, Operand(reg), s);
} }
void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
add(dst, src, Operand(src), s);
}
// Try to convert int32 to smi. If the value is to large, preserve // Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and // the original value and jump to not_a_smi. Destroys scratch and
@ -733,7 +726,20 @@ class MacroAssembler: public Assembler {
void SmiUntag(Register reg) { void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize)); mov(reg, Operand(reg, ASR, kSmiTagSize));
} }
void SmiUntag(Register dst, Register src) {
mov(dst, Operand(src, ASR, kSmiTagSize));
}
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Jump if either of the registers contain a non-smi. // Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
// Jump if either of the registers contain a smi. // Jump if either of the registers contain a smi.
@ -743,6 +749,14 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object); void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object); void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// HeapNumber utilities
void JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// String utilities // String utilities

478
deps/v8/src/arm/simulator-arm.cc

File diff suppressed because it is too large

84
deps/v8/src/arm/simulator-arm.h

@ -80,8 +80,8 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h" #include "constants-arm.h"
#include "hashmap.h" #include "hashmap.h"
namespace assembler { namespace v8 {
namespace arm { namespace internal {
class CachePage { class CachePage {
public: public:
@ -203,11 +203,11 @@ class Simulator {
}; };
// Unsupported instructions use Format to print an error and stop execution. // Unsupported instructions use Format to print an error and stop execution.
void Format(Instr* instr, const char* format); void Format(Instruction* instr, const char* format);
// Checks if the current instruction should be executed based on its // Checks if the current instruction should be executed based on its
// condition bits. // condition bits.
bool ConditionallyExecute(Instr* instr); bool ConditionallyExecute(Instruction* instr);
// Helper functions to set the conditional flags in the architecture state. // Helper functions to set the conditional flags in the architecture state.
void SetNZFlags(int32_t val); void SetNZFlags(int32_t val);
@ -225,13 +225,13 @@ class Simulator {
void Copy_FPSCR_to_APSR(); void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes // Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out); int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out); int32_t GetImm(Instruction* instr, bool* carry_out);
void HandleRList(Instr* instr, bool load); void HandleRList(Instruction* instr, bool load);
void SoftwareInterrupt(Instr* instr); void SoftwareInterrupt(Instruction* instr);
// Stop helper functions. // Stop helper functions.
inline bool isStopInstruction(Instr* instr); inline bool isStopInstruction(Instruction* instr);
inline bool isWatchedStop(uint32_t bkpt_code); inline bool isWatchedStop(uint32_t bkpt_code);
inline bool isEnabledStop(uint32_t bkpt_code); inline bool isEnabledStop(uint32_t bkpt_code);
inline void EnableStop(uint32_t bkpt_code); inline void EnableStop(uint32_t bkpt_code);
@ -245,41 +245,42 @@ class Simulator {
inline void WriteB(int32_t addr, uint8_t value); inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value); inline void WriteB(int32_t addr, int8_t value);
inline uint16_t ReadHU(int32_t addr, Instr* instr); inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instr* instr); inline int16_t ReadH(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value. // Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instr* instr); inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instr* instr); inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instr* instr); inline int ReadW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instr* instr); inline void WriteW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr); int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2); void WriteDW(int32_t addr, int32_t value1, int32_t value2);
// Executing is handled based on the instruction type. // Executing is handled based on the instruction type.
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one // Both type 0 and type 1 rolled into one.
void DecodeType2(Instr* instr); void DecodeType01(Instruction* instr);
void DecodeType3(Instr* instr); void DecodeType2(Instruction* instr);
void DecodeType4(Instr* instr); void DecodeType3(Instruction* instr);
void DecodeType5(Instr* instr); void DecodeType4(Instruction* instr);
void DecodeType6(Instr* instr); void DecodeType5(Instruction* instr);
void DecodeType7(Instr* instr); void DecodeType6(Instruction* instr);
void DecodeType7(Instruction* instr);
// Support for VFP. // Support for VFP.
void DecodeTypeVFP(Instr* instr); void DecodeTypeVFP(Instruction* instr);
void DecodeType6CoprocessorIns(Instr* instr); void DecodeType6CoprocessorIns(Instruction* instr);
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr); void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instr* instr); void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instr* instr); void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr); void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instruction* instr);
// ICache. // ICache.
static void CheckICache(Instr* instr); static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size); static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page); static CachePage* GetCachePage(void* page);
@ -330,8 +331,8 @@ class Simulator {
static v8::internal::HashMap* i_cache_; static v8::internal::HashMap* i_cache_;
// Registered breakpoints. // Registered breakpoints.
Instr* break_pc_; Instruction* break_pc_;
instr_t break_instr_; Instr break_instr_;
// A stop is watched if its code is less than kNumOfWatchedStops. // A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature. // Only watched stops support enabling/disabling and the counter feature.
@ -344,27 +345,22 @@ class Simulator {
// instruction, if bit 31 of watched_stops[code].count is unset. // instruction, if bit 31 of watched_stops[code].count is unset.
// The value watched_stops[code].count & ~(1 << 31) indicates how many times // The value watched_stops[code].count & ~(1 << 31) indicates how many times
// the breakpoint was hit or gone through. // the breakpoint was hit or gone through.
struct StopCoundAndDesc { struct StopCountAndDesc {
uint32_t count; uint32_t count;
char* desc; char* desc;
}; };
StopCoundAndDesc watched_stops[kNumOfWatchedStops]; StopCountAndDesc watched_stops[kNumOfWatchedStops];
}; };
} } // namespace assembler::arm
namespace v8 {
namespace internal {
// When running with the simulator transition into simulated execution at this // When running with the simulator transition into simulated execution at this
// point. // point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(assembler::arm::Simulator::current()->Call( \ reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
assembler::arm::Simulator::current()->Call( \ Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
@ -380,16 +376,16 @@ namespace internal {
class SimulatorStack : public v8::internal::AllStatic { class SimulatorStack : public v8::internal::AllStatic {
public: public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit(); return Simulator::current()->StackLimit();
} }
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::arm::Simulator* sim = assembler::arm::Simulator::current(); Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address); return sim->PushAddress(try_catch_address);
} }
static inline void UnregisterCTryCatch() { static inline void UnregisterCTryCatch() {
assembler::arm::Simulator::current()->PopAddress(); Simulator::current()->PopAddress();
} }
}; };

108
deps/v8/src/arm/stub-cache-arm.cc

@ -370,27 +370,31 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* miss) { Label* miss,
bool support_wrappers) {
Label check_wrapper; Label check_wrapper;
// Check if the object is a string leaving the instance type in the // Check if the object is a string leaving the instance type in the
// scratch1 register. // scratch1 register.
GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
support_wrappers ? &check_wrapper : miss);
// Load length directly from the string. // Load length directly from the string.
__ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
__ Ret(); __ Ret();
// Check if the object is a JSValue wrapper. if (support_wrappers) {
__ bind(&check_wrapper); // Check if the object is a JSValue wrapper.
__ cmp(scratch1, Operand(JS_VALUE_TYPE)); __ bind(&check_wrapper);
__ b(ne, miss); __ cmp(scratch1, Operand(JS_VALUE_TYPE));
__ b(ne, miss);
// Unwrap the value and check if the wrapped value is a string. // Unwrap the value and check if the wrapped value is a string.
__ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
__ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
__ Ret(); __ Ret();
}
} }
@ -521,7 +525,7 @@ static void GenerateCallFunction(MacroAssembler* masm,
// ----------------------------------- // -----------------------------------
// Check that the function really is a function. // Check that the function really is a function.
__ BranchOnSmi(r1, miss); __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss); __ b(ne, miss);
@ -660,7 +664,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss); __ JumpIfSmi(receiver, miss);
CallOptimization optimization(lookup); CallOptimization optimization(lookup);
@ -1194,17 +1198,16 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
} }
bool StubCompiler::GenerateLoadCallback(JSObject* object, MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss, Label* miss) {
Failure** failure) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask)); __ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss); __ b(eq, miss);
@ -1225,7 +1228,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(load_callback_property, 5, 1); __ TailCallExternalReference(load_callback_property, 5, 1);
return true; return Heap::undefined_value(); // Success.
} }
@ -1243,7 +1246,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, miss); __ JumpIfSmi(receiver, miss);
// So far the most popular follow ups for interceptor loads are FIELD // So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added // and CALLBACKS, so inline only them, other cases may be added
@ -1511,7 +1514,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize)); __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss); __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), receiver, CheckPrototypes(JSObject::cast(object), receiver,
@ -1565,7 +1568,7 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
__ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
// Check for a smi. // Check for a smi.
__ BranchOnNotSmi(r4, &with_write_barrier); __ JumpIfNotSmi(r4, &with_write_barrier);
__ bind(&exit); __ bind(&exit);
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
@ -1672,7 +1675,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
__ ldr(receiver, MemOperand(sp, argc * kPointerSize)); __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(receiver, &miss); __ JumpIfSmi(receiver, &miss);
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), CheckPrototypes(JSObject::cast(object),
@ -2009,7 +2012,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ ldr(r1, MemOperand(sp, 1 * kPointerSize)); __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ BranchOnSmi(r1, &miss); __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss); &miss);
@ -2168,7 +2171,7 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
// Check if the argument is a smi. // Check if the argument is a smi.
Label not_smi; Label not_smi;
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ BranchOnNotSmi(r0, &not_smi); __ JumpIfNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the // Do bitwise not or do nothing depending on the sign of the
// argument. // argument.
@ -2646,9 +2649,18 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ cmp(r3, Operand(Handle<Map>(object->map()))); __ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss); __ b(ne, &miss);
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
__ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ cmp(r5, r6);
__ b(eq, &miss);
// Store the value in the cell. // Store the value in the cell.
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3); __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
__ Ret(); __ Ret();
@ -2738,12 +2750,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
Failure* failure = Failure::InternalError(); MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name, &miss);
callback, name, &miss, &failure); if (result->IsFailure()) {
if (!success) {
miss.Unuse(); miss.Unuse();
return failure; return result;
} }
__ bind(&miss); __ bind(&miss);
@ -2890,12 +2901,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(r0, Operand(Handle<String>(name))); __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss); __ b(ne, &miss);
Failure* failure = Failure::InternalError(); MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r4, callback, name, &miss);
callback, name, &miss, &failure); if (result->IsFailure()) {
if (!success) {
miss.Unuse(); miss.Unuse();
return failure; return result;
} }
__ bind(&miss); __ bind(&miss);
@ -2995,7 +3005,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(r0, Operand(Handle<String>(name))); __ cmp(r0, Operand(Handle<String>(name)));
__ b(ne, &miss); __ b(ne, &miss);
GenerateLoadStringLength(masm(), r1, r2, r3, &miss); GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3); __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
@ -3361,10 +3371,10 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
Register receiver = r1; Register receiver = r1;
// Check that the object isn't a smi // Check that the object isn't a smi
__ BranchOnSmi(receiver, &slow); __ JumpIfSmi(receiver, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(key, &slow); __ JumpIfNotSmi(key, &slow);
// Check that the object is a JS object. Load map into r2. // Check that the object is a JS object. Load map into r2.
__ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
@ -3645,7 +3655,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// r3 mostly holds the elements array or the destination external array. // r3 mostly holds the elements array or the destination external array.
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ BranchOnSmi(receiver, &slow); __ JumpIfSmi(receiver, &slow);
// Check that the object is a JS object. Load map into r3. // Check that the object is a JS object. Load map into r3.
__ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
@ -3658,7 +3668,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
__ b(ne, &slow); __ b(ne, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ BranchOnNotSmi(key, &slow); __ JumpIfNotSmi(key, &slow);
// Check that the elements array is the appropriate type of ExternalArray. // Check that the elements array is the appropriate type of ExternalArray.
__ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@ -3678,7 +3688,7 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// runtime for all other kinds of values. // runtime for all other kinds of values.
// r3: external array. // r3: external array.
// r4: key (integer). // r4: key (integer).
__ BranchOnNotSmi(value, &check_heap_number); __ JumpIfNotSmi(value, &check_heap_number);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));

3
deps/v8/src/assembler.h

@ -185,7 +185,6 @@ class RelocInfo BASE_EMBEDDED {
DEBUG_BREAK, // Code target for the debugger statement. DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above. CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT, EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL, GLOBAL_PROPERTY_CELL,
// Everything after runtime_entry (inclusive) is not GC'ed. // Everything after runtime_entry (inclusive) is not GC'ed.
@ -203,7 +202,7 @@ class RelocInfo BASE_EMBEDDED {
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET, LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
}; };

45
deps/v8/src/ast.cc

@ -239,12 +239,19 @@ void ObjectLiteral::CalculateEmitStore() {
HashMap* table; HashMap* table;
void* key; void* key;
uint32_t index; uint32_t index;
Smi* smi_key_location;
if (handle->IsSymbol()) { if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle)); Handle<String> name(String::cast(*handle));
ASSERT(!name->AsArrayIndex(&index)); if (name->AsArrayIndex(&index)) {
key = name.location(); smi_key_location = Smi::FromInt(index);
hash = name->Hash(); key = &smi_key_location;
table = &properties; hash = index;
table = &elements;
} else {
key = name.location();
hash = name->Hash();
table = &properties;
}
} else if (handle->ToArrayIndex(&index)) { } else if (handle->ToArrayIndex(&index)) {
key = handle.location(); key = handle.location();
hash = index; hash = index;
@ -514,6 +521,8 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
if (key()->IsPropertyName()) { if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) { if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true; is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this, } else if (oracle->LoadIsBuiltin(this,
Builtins::LoadIC_FunctionPrototype)) { Builtins::LoadIC_FunctionPrototype)) {
is_function_prototype_ = true; is_function_prototype_ = true;
@ -570,7 +579,14 @@ static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) { bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
holder_ = Handle<JSObject>::null(); if (check_type_ == RECEIVER_MAP_CHECK) {
// For primitive checks the holder is set up to point to the
// corresponding prototype object, i.e. one step of the algorithm
// below has been already performed.
// For non-primitive checks we clear it to allow computing targets
// for polymorphic calls.
holder_ = Handle<JSObject>::null();
}
while (true) { while (true) {
LookupResult lookup; LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup); type->LookupInDescriptors(NULL, *name, &lookup);
@ -640,27 +656,20 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
map = receiver_types_->at(0); map = receiver_types_->at(0);
} else { } else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK); ASSERT(check_type_ != RECEIVER_MAP_CHECK);
map = Handle<Map>( holder_ = Handle<JSObject>(
oracle->GetPrototypeForPrimitiveCheck(check_type_)->map()); oracle->GetPrototypeForPrimitiveCheck(check_type_));
map = Handle<Map>(holder_->map());
} }
is_monomorphic_ = ComputeTarget(map, name); is_monomorphic_ = ComputeTarget(map, name);
} }
} }
void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
is_smi_only_ = left.IsSmi() && right.IsSmi();
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT); TypeInfo info = oracle->CompareType(this);
TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT); if (info.IsSmi()) {
if (left.IsSmi() && right.IsSmi()) {
compare_type_ = SMI_ONLY; compare_type_ = SMI_ONLY;
} else if (left.IsNonPrimitive() && right.IsNonPrimitive()) { } else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY; compare_type_ = OBJECT_ONLY;
} else { } else {
ASSERT(compare_type_ == NONE); ASSERT(compare_type_ == NONE);

20
deps/v8/src/ast.h

@ -1205,9 +1205,10 @@ class Property: public Expression {
key_(key), key_(key),
pos_(pos), pos_(pos),
type_(type), type_(type),
is_monomorphic_(false),
receiver_types_(NULL), receiver_types_(NULL),
is_monomorphic_(false),
is_array_length_(false), is_array_length_(false),
is_string_length_(false),
is_function_prototype_(false), is_function_prototype_(false),
is_arguments_access_(false) { } is_arguments_access_(false) { }
@ -1221,6 +1222,7 @@ class Property: public Expression {
int position() const { return pos_; } int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; } bool is_synthetic() const { return type_ == SYNTHETIC; }
bool IsStringLength() const { return is_string_length_; }
bool IsFunctionPrototype() const { return is_function_prototype_; } bool IsFunctionPrototype() const { return is_function_prototype_; }
// Marks that this is actually an argument rewritten to a keyed property // Marks that this is actually an argument rewritten to a keyed property
@ -1249,11 +1251,12 @@ class Property: public Expression {
int pos_; int pos_;
Type type_; Type type_;
bool is_monomorphic_;
ZoneMapList* receiver_types_; ZoneMapList* receiver_types_;
bool is_array_length_; bool is_monomorphic_ : 1;
bool is_function_prototype_; bool is_array_length_ : 1;
bool is_arguments_access_; bool is_string_length_ : 1;
bool is_function_prototype_ : 1;
bool is_arguments_access_ : 1;
Handle<Map> monomorphic_receiver_type_; Handle<Map> monomorphic_receiver_type_;
// Dummy property used during preparsing. // Dummy property used during preparsing.
@ -1395,7 +1398,7 @@ class BinaryOperation: public Expression {
Expression* left, Expression* left,
Expression* right, Expression* right,
int pos) int pos)
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) { : op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op)); ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR) right_id_ = (op == Token::AND || op == Token::OR)
? static_cast<int>(GetNextId()) ? static_cast<int>(GetNextId())
@ -1416,10 +1419,6 @@ class BinaryOperation: public Expression {
Expression* right() const { return right_; } Expression* right() const { return right_; }
int position() const { return pos_; } int position() const { return pos_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiOnly() const { return is_smi_only_; }
// Bailout support. // Bailout support.
int RightId() const { return right_id_; } int RightId() const { return right_id_; }
@ -1428,7 +1427,6 @@ class BinaryOperation: public Expression {
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
int pos_; int pos_;
bool is_smi_only_;
// The short-circuit logical operations have an AST ID for their // The short-circuit logical operations have an AST ID for their
// right-hand subexpression. // right-hand subexpression.
int right_id_; int right_id_;

3
deps/v8/src/bootstrapper.cc

@ -1805,9 +1805,8 @@ Genesis::Genesis(Handle<Object> global_object,
AddToWeakGlobalContextList(*global_context_); AddToWeakGlobalContextList(*global_context_);
Top::set_context(*global_context_); Top::set_context(*global_context_);
i::Counters::contexts_created_by_snapshot.Increment(); i::Counters::contexts_created_by_snapshot.Increment();
result_ = global_context_;
JSFunction* empty_function = JSFunction* empty_function =
JSFunction::cast(result_->function_map()->prototype()); JSFunction::cast(global_context_->function_map()->prototype());
empty_function_ = Handle<JSFunction>(empty_function); empty_function_ = Handle<JSFunction>(empty_function);
Handle<GlobalObject> inner_global; Handle<GlobalObject> inner_global;
Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy> global_proxy =

7
deps/v8/src/builtins.cc

@ -1228,7 +1228,12 @@ static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
static void Generate_LoadIC_StringLength(MacroAssembler* masm) { static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
LoadIC::GenerateStringLength(masm); LoadIC::GenerateStringLength(masm, false);
}
static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
LoadIC::GenerateStringLength(masm, true);
} }

1
deps/v8/src/builtins.h

@ -86,6 +86,7 @@ enum BuiltinExtraArguments {
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \ V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \ V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \ V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
\ \

16
deps/v8/src/code-stubs.h

@ -273,21 +273,20 @@ class FastNewClosureStub : public CodeStub {
class FastNewContextStub : public CodeStub { class FastNewContextStub : public CodeStub {
public: public:
// We want no more than 64 different stubs. static const int kMaximumSlots = 64;
static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63;
explicit FastNewContextStub(int slots) : slots_(slots) { explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots); ASSERT(slots_ > 0 && slots <= kMaximumSlots);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
virtual const char* GetName() { return "FastNewContextStub"; }
virtual Major MajorKey() { return FastNewContext; }
virtual int MinorKey() { return slots_; }
int slots_; int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
}; };
@ -600,8 +599,7 @@ class CEntryStub : public CodeStub {
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
bool do_gc, bool do_gc,
bool always_allocate_scope, bool always_allocate_scope);
int alignment_skew = 0);
void GenerateThrowTOS(MacroAssembler* masm); void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm, void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type); UncatchableExceptionType type);

19
deps/v8/src/deoptimizer.h

@ -128,14 +128,17 @@ class Deoptimizer : public Malloced {
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor); static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
// Given the relocation info of a call to the stack check stub, patch the // Patch all stack guard checks in the unoptimized code to
// code so as to go unconditionally to the on-stack replacement builtin // unconditionally call replacement_code.
// instead. static void PatchStackCheckCode(Code* unoptimized_code,
static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code); Code* check_code,
Code* replacement_code);
// Given the relocation info of a call to the on-stack replacement
// builtin, patch the code back to the original stack check code. // Change all patched stack guard checks in the unoptimized code
static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code); // back to a normal stack guard check.
static void RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code);
~Deoptimizer(); ~Deoptimizer();

6
deps/v8/src/frames.cc

@ -695,7 +695,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
ASSERT(frames->length() == 0); ASSERT(frames->length() == 0);
ASSERT(is_optimized()); ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber; int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index); DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
// BUG(3243555): Since we don't have a lazy-deopt registered at // BUG(3243555): Since we don't have a lazy-deopt registered at
@ -793,7 +793,7 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc()); SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index(); *deopt_index = safepoint_entry.deoptimization_index();
ASSERT(*deopt_index != AstNode::kNoNumber); ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
return DeoptimizationInputData::cast(code->deoptimization_data()); return DeoptimizationInputData::cast(code->deoptimization_data());
} }
@ -803,7 +803,7 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
ASSERT(functions->length() == 0); ASSERT(functions->length() == 0);
ASSERT(is_optimized()); ASSERT(is_optimized());
int deopt_index = AstNode::kNoNumber; int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index); DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(), TranslationIterator it(data->TranslationByteArray(),

6
deps/v8/src/heap.cc

@ -35,6 +35,7 @@
#include "debug.h" #include "debug.h"
#include "heap-profiler.h" #include "heap-profiler.h"
#include "global-handles.h" #include "global-handles.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "natives.h" #include "natives.h"
#include "objects-visiting.h" #include "objects-visiting.h"
@ -400,6 +401,8 @@ void Heap::GarbageCollectionPrologue() {
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC(); ReportStatisticsBeforeGC();
#endif #endif
LiveObjectList::GCPrologue();
} }
intptr_t Heap::SizeOfObjects() { intptr_t Heap::SizeOfObjects() {
@ -412,6 +415,7 @@ intptr_t Heap::SizeOfObjects() {
} }
void Heap::GarbageCollectionEpilogue() { void Heap::GarbageCollectionEpilogue() {
LiveObjectList::GCEpilogue();
#ifdef DEBUG #ifdef DEBUG
allow_allocation(true); allow_allocation(true);
ZapFromSpace(); ZapFromSpace();
@ -1066,6 +1070,8 @@ void Heap::Scavenge() {
UpdateNewSpaceReferencesInExternalStringTable( UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry); &UpdateNewSpaceReferenceInExternalStringTableEntry);
LiveObjectList::UpdateReferencesForScavengeGC();
ASSERT(new_space_front == new_space_.top()); ASSERT(new_space_front == new_space_.top());
// Set age mark. // Set age mark.

31
deps/v8/src/hydrogen-instructions.cc

@ -490,7 +490,7 @@ void HInstruction::InsertAfter(HInstruction* previous) {
#ifdef DEBUG #ifdef DEBUG
void HInstruction::Verify() const { void HInstruction::Verify() {
// Verify that input operands are defined before use. // Verify that input operands are defined before use.
HBasicBlock* cur_block = block(); HBasicBlock* cur_block = block();
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
@ -517,6 +517,11 @@ void HInstruction::Verify() const {
if (HasSideEffects() && !IsOsrEntry()) { if (HasSideEffects() && !IsOsrEntry()) {
ASSERT(next()->IsSimulate()); ASSERT(next()->IsSimulate());
} }
// Verify that instructions that can be eliminated by GVN have overridden
// HValue::DataEquals. The default implementation is UNREACHABLE. We
// don't actually care whether DataEquals returns true or false here.
if (CheckFlag(kUseGVN)) DataEquals(this);
} }
#endif #endif
@ -524,7 +529,7 @@ void HInstruction::Verify() const {
HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) { HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
for (int i = 0; i < count; ++i) arguments_[i] = NULL; for (int i = 0; i < count; ++i) arguments_[i] = NULL;
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
@ -1119,10 +1124,10 @@ void HCompare::PrintDataTo(StringStream* stream) const {
void HCompare::SetInputRepresentation(Representation r) { void HCompare::SetInputRepresentation(Representation r) {
input_representation_ = r; input_representation_ = r;
if (r.IsTagged()) { if (r.IsTagged()) {
SetFlagMask(AllSideEffects()); SetAllSideEffects();
ClearFlag(kUseGVN); ClearFlag(kUseGVN);
} else { } else {
ClearFlagMask(AllSideEffects()); ClearAllSideEffects();
SetFlag(kUseGVN); SetFlag(kUseGVN);
} }
} }
@ -1388,7 +1393,7 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
// Node-specific verification code is only included in debug mode. // Node-specific verification code is only included in debug mode.
#ifdef DEBUG #ifdef DEBUG
void HPhi::Verify() const { void HPhi::Verify() {
ASSERT(OperandCount() == block()->predecessors()->length()); ASSERT(OperandCount() == block()->predecessors()->length());
for (int i = 0; i < OperandCount(); ++i) { for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i); HValue* value = OperandAt(i);
@ -1400,49 +1405,49 @@ void HPhi::Verify() const {
} }
void HSimulate::Verify() const { void HSimulate::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasAstId()); ASSERT(HasAstId());
} }
void HBoundsCheck::Verify() const { void HBoundsCheck::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckSmi::Verify() const { void HCheckSmi::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckNonSmi::Verify() const { void HCheckNonSmi::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckInstanceType::Verify() const { void HCheckInstanceType::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckMap::Verify() const { void HCheckMap::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckFunction::Verify() const { void HCheckFunction::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }
void HCheckPrototypeMaps::Verify() const { void HCheckPrototypeMaps::Verify() {
HInstruction::Verify(); HInstruction::Verify();
ASSERT(HasNoUses()); ASSERT(HasNoUses());
} }

310
deps/v8/src/hydrogen-instructions.h

@ -46,112 +46,6 @@ class LInstruction;
class LChunkBuilder; class LChunkBuilder;
// Type hierarchy:
//
// HValue
// HInstruction
// HAccessArgumentsAt
// HApplyArguments
// HArgumentsElements
// HArgumentsLength
// HArgumentsObject
// HBinaryOperation
// HArithmeticBinaryOperation
// HAdd
// HDiv
// HMod
// HMul
// HSub
// HBitwiseBinaryOperation
// HBitAnd
// HBitOr
// HBitXor
// HSar
// HShl
// HShr
// HBoundsCheck
// HCompare
// HCompareJSObjectEq
// HInstanceOf
// HInstanceOfKnownGlobal
// HLoadKeyed
// HLoadKeyedFastElement
// HLoadKeyedGeneric
// HPower
// HStoreNamed
// HStoreNamedField
// HStoreNamedGeneric
// HStringCharCodeAt
// HBlockEntry
// HCall
// HCallConstantFunction
// HCallFunction
// HCallGlobal
// HCallKeyed
// HCallKnownGlobal
// HCallNamed
// HCallNew
// HCallRuntime
// HCallStub
// HCheckPrototypeMaps
// HConstant
// HControlInstruction
// HDeoptimize
// HGoto
// HUnaryControlInstruction
// HCompareMap
// HReturn
// HTest
// HThrow
// HEnterInlined
// HFunctionLiteral
// HGlobalObject
// HGlobalReceiver
// HLeaveInlined
// HLoadContextSlot
// HLoadGlobal
// HMaterializedLiteral
// HArrayLiteral
// HObjectLiteral
// HRegExpLiteral
// HOsrEntry
// HParameter
// HSimulate
// HStackCheck
// HStoreKeyed
// HStoreKeyedFastElement
// HStoreKeyedGeneric
// HUnaryOperation
// HBitNot
// HChange
// HCheckFunction
// HCheckInstanceType
// HCheckMap
// HCheckNonSmi
// HCheckSmi
// HDeleteProperty
// HFixedArrayLength
// HJSArrayLength
// HLoadElements
// HTypeofIs
// HLoadNamedField
// HLoadNamedGeneric
// HLoadFunctionPrototype
// HPushArgument
// HStringLength
// HTypeof
// HUnaryMathOperation
// HUnaryPredicate
// HClassOfTest
// HHasCachedArrayIndex
// HHasInstanceType
// HIsNull
// HIsObject
// HIsSmi
// HValueOf
// HUnknownOSRValue
// HPhi
#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \ #define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \ V(ArithmeticBinaryOperation) \
V(BinaryOperation) \ V(BinaryOperation) \
@ -224,12 +118,12 @@ class LChunkBuilder;
V(LeaveInlined) \ V(LeaveInlined) \
V(LoadContextSlot) \ V(LoadContextSlot) \
V(LoadElements) \ V(LoadElements) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \ V(LoadGlobal) \
V(LoadKeyedFastElement) \ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \ V(LoadKeyedGeneric) \
V(LoadNamedField) \ V(LoadNamedField) \
V(LoadNamedGeneric) \ V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(Mod) \ V(Mod) \
V(Mul) \ V(Mul) \
V(ObjectLiteral) \ V(ObjectLiteral) \
@ -268,7 +162,6 @@ class LChunkBuilder;
V(GlobalVars) \ V(GlobalVars) \
V(Maps) \ V(Maps) \
V(ArrayLengths) \ V(ArrayLengths) \
V(FunctionPrototypes) \
V(OsrEntries) V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \ #define DECLARE_INSTRUCTION(type) \
@ -573,11 +466,6 @@ class HValue: public ZoneObject {
return flags << kChangesToDependsFlagsLeftShift; return flags << kChangesToDependsFlagsLeftShift;
} }
// A flag mask to mark an instruction as having arbitrary side effects.
static int AllSideEffects() {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
static HValue* cast(HValue* value) { return value; } static HValue* cast(HValue* value) { return value; }
enum Opcode { enum Opcode {
@ -636,9 +524,6 @@ class HValue: public ZoneObject {
return NULL; return NULL;
} }
bool HasSideEffects() const {
return (flags_ & AllSideEffects()) != 0;
}
bool IsDefinedAfter(HBasicBlock* other) const; bool IsDefinedAfter(HBasicBlock* other) const;
// Operands. // Operands.
@ -661,12 +546,13 @@ class HValue: public ZoneObject {
void Delete(); void Delete();
int flags() const { return flags_; } int flags() const { return flags_; }
void SetFlagMask(int mask) { flags_ |= mask; } void SetFlag(Flag f) { flags_ |= (1 << f); }
void SetFlag(Flag f) { SetFlagMask(1 << f); } void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
void ClearFlagMask(int mask) { flags_ &= ~mask; } bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); } void SetAllSideEffects() { flags_ |= AllSideEffects(); }
bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; } void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
Range* range() const { return range_; } Range* range() const { return range_; }
bool HasRange() const { return range_ != NULL; } bool HasRange() const { return range_ != NULL; }
@ -714,11 +600,16 @@ class HValue: public ZoneObject {
void InsertInputConversion(HInstruction* previous, int index, HType type); void InsertInputConversion(HInstruction* previous, int index, HType type);
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const = 0; virtual void Verify() = 0;
#endif #endif
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; } // This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
virtual bool DataEquals(HValue* other) const {
UNREACHABLE();
return false;
}
virtual void RepresentationChanged(Representation to) { } virtual void RepresentationChanged(Representation to) { }
virtual Range* InferRange(); virtual Range* InferRange();
virtual void DeleteFromGraph() = 0; virtual void DeleteFromGraph() = 0;
@ -735,6 +626,11 @@ class HValue: public ZoneObject {
} }
private: private:
// A flag mask to mark an instruction as having arbitrary side effects.
static int AllSideEffects() {
return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
}
void InternalReplaceAtUse(HValue* use, HValue* other); void InternalReplaceAtUse(HValue* use, HValue* other);
void RegisterUse(int index, HValue* new_value); void RegisterUse(int index, HValue* new_value);
@ -774,7 +670,7 @@ class HInstruction: public HValue {
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0; virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
// Returns whether this is some kind of deoptimizing check // Returns whether this is some kind of deoptimizing check
@ -1063,7 +959,7 @@ class HSimulate: public HInstruction {
DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate") DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
protected: protected:
@ -1159,6 +1055,9 @@ class HGlobalObject: public HInstruction {
} }
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object") DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1171,6 +1070,9 @@ class HGlobalReceiver: public HInstruction {
} }
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver") DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1361,6 +1263,9 @@ class HJSArrayLength: public HUnaryOperation {
} }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length") DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1377,6 +1282,9 @@ class HFixedArrayLength: public HUnaryOperation {
} }
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length") DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1394,6 +1302,9 @@ class HBitNot: public HUnaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not") DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1489,6 +1400,9 @@ class HLoadElements: public HUnaryOperation {
} }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements") DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1510,7 +1424,7 @@ class HCheckMap: public HUnaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
Handle<Map> map() const { return map_; } Handle<Map> map() const { return map_; }
@ -1545,7 +1459,7 @@ class HCheckFunction: public HUnaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
Handle<JSFunction> target() const { return target_; } Handle<JSFunction> target() const { return target_; }
@ -1587,7 +1501,7 @@ class HCheckInstanceType: public HUnaryOperation {
} }
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value); static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
@ -1628,10 +1542,13 @@ class HCheckNonSmi: public HUnaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi") DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1646,7 +1563,7 @@ class HCheckPrototypeMaps: public HInstruction {
virtual bool IsCheckInstruction() const { return true; } virtual bool IsCheckInstruction() const { return true; }
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
Handle<JSObject> prototype() const { return prototype_; } Handle<JSObject> prototype() const { return prototype_; }
@ -1689,10 +1606,13 @@ class HCheckSmi: public HUnaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi") DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1745,7 +1665,7 @@ class HPhi: public HValue {
virtual void PrintTo(StringStream* stream) const; virtual void PrintTo(StringStream* stream) const;
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
DECLARE_INSTRUCTION(Phi) DECLARE_INSTRUCTION(Phi)
@ -1833,7 +1753,7 @@ class HConstant: public HInstruction {
} }
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const { } virtual void Verify() { }
#endif #endif
DECLARE_CONCRETE_INSTRUCTION(Constant, "constant") DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
@ -1952,6 +1872,9 @@ class HArgumentsElements: public HInstruction {
} }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements") DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1963,6 +1886,9 @@ class HArgumentsLength: public HUnaryOperation {
} }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length") DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -1999,6 +1925,8 @@ class HAccessArgumentsAt: public HInstruction {
operands_[index] = value; operands_[index] = value;
} }
virtual bool DataEquals(HValue* other) const { return true; }
private: private:
HOperandVector<3> operands_; HOperandVector<3> operands_;
}; };
@ -2018,13 +1946,16 @@ class HBoundsCheck: public HBinaryOperation {
} }
#ifdef DEBUG #ifdef DEBUG
virtual void Verify() const; virtual void Verify();
#endif #endif
HValue* index() const { return left(); } HValue* index() const { return left(); }
HValue* length() const { return right(); } HValue* length() const { return right(); }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check") DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2034,7 +1965,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
: HBinaryOperation(left, right) { : HBinaryOperation(left, right) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation); SetFlag(kFlexibleRepresentation);
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {
@ -2044,7 +1975,7 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
virtual void RepresentationChanged(Representation to) { virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) { if (!to.IsTagged()) {
ASSERT(to.IsInteger32()); ASSERT(to.IsInteger32());
ClearFlagMask(AllSideEffects()); ClearAllSideEffects();
SetFlag(kTruncatingToInt32); SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN); SetFlag(kUseGVN);
} }
@ -2062,12 +1993,12 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: HBinaryOperation(left, right) { : HBinaryOperation(left, right) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation); SetFlag(kFlexibleRepresentation);
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
virtual void RepresentationChanged(Representation to) { virtual void RepresentationChanged(Representation to) {
if (!to.IsTagged()) { if (!to.IsTagged()) {
ClearFlagMask(AllSideEffects()); ClearAllSideEffects();
SetFlag(kUseGVN); SetFlag(kUseGVN);
} }
} }
@ -2093,7 +2024,7 @@ class HCompare: public HBinaryOperation {
: HBinaryOperation(left, right), token_(token) { : HBinaryOperation(left, right), token_(token) {
ASSERT(Token::IsCompareOp(token)); ASSERT(Token::IsCompareOp(token));
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
void SetInputRepresentation(Representation r); void SetInputRepresentation(Representation r);
@ -2142,6 +2073,9 @@ class HCompareJSObjectEq: public HBinaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq") DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2184,6 +2118,9 @@ class HIsObject: public HUnaryPredicate {
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { } explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object") DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2192,6 +2129,9 @@ class HIsSmi: public HUnaryPredicate {
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { } explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi") DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2228,6 +2168,9 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { } explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index") DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2278,7 +2221,7 @@ class HInstanceOf: public HBinaryOperation {
public: public:
HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) { HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
virtual bool EmitAtUses() const { return uses()->length() <= 1; } virtual bool EmitAtUses() const { return uses()->length() <= 1; }
@ -2296,7 +2239,7 @@ class HInstanceOfKnownGlobal: public HUnaryOperation {
HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right) HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
: HUnaryOperation(left), function_(right) { : HUnaryOperation(left), function_(right) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
Handle<JSFunction> function() { return function_; } Handle<JSFunction> function() { return function_; }
@ -2326,6 +2269,9 @@ class HPower: public HBinaryOperation {
} }
DECLARE_CONCRETE_INSTRUCTION(Power, "power") DECLARE_CONCRETE_INSTRUCTION(Power, "power")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2348,6 +2294,8 @@ class HAdd: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Add, "add") DECLARE_CONCRETE_INSTRUCTION(Add, "add")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2363,6 +2311,8 @@ class HSub: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Sub, "sub") DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2383,6 +2333,8 @@ class HMul: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mul, "mul") DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2398,6 +2350,8 @@ class HMod: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Mod, "mod") DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2414,6 +2368,8 @@ class HDiv: public HArithmeticBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(Div, "div") DECLARE_CONCRETE_INSTRUCTION(Div, "div")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2429,6 +2385,8 @@ class HBitAnd: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and") DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2442,6 +2400,9 @@ class HBitXor: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor") DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2456,6 +2417,8 @@ class HBitOr: public HBitwiseBinaryOperation {
DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or") DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange(); virtual Range* InferRange();
}; };
@ -2469,6 +2432,9 @@ class HShl: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shl, "shl") DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2480,6 +2446,9 @@ class HShr: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Shr, "shr") DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2492,6 +2461,9 @@ class HSar: public HBitwiseBinaryOperation {
virtual HType CalculateInferredType() const; virtual HType CalculateInferredType() const;
DECLARE_CONCRETE_INSTRUCTION(Sar, "sar") DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
@ -2534,7 +2506,7 @@ class HCallStub: public HInstruction {
argument_count_(argument_count), argument_count_(argument_count),
transcendental_type_(TranscendentalCache::kNumberOfCaches) { transcendental_type_(TranscendentalCache::kNumberOfCaches) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
CodeStub::Major major_key() { return major_key_; } CodeStub::Major major_key() { return major_key_; }
@ -2603,12 +2575,17 @@ class HLoadGlobal: public HInstruction {
class HStoreGlobal: public HUnaryOperation { class HStoreGlobal: public HUnaryOperation {
public: public:
HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell) HStoreGlobal(HValue* value,
: HUnaryOperation(value), cell_(cell) { Handle<JSGlobalPropertyCell> cell,
bool check_hole_value)
: HUnaryOperation(value),
cell_(cell),
check_hole_value_(check_hole_value) {
SetFlag(kChangesGlobalVars); SetFlag(kChangesGlobalVars);
} }
Handle<JSGlobalPropertyCell> cell() const { return cell_; } Handle<JSGlobalPropertyCell> cell() const { return cell_; }
bool check_hole_value() const { return check_hole_value_; }
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged(); return Representation::Tagged();
@ -2617,14 +2594,9 @@ class HStoreGlobal: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global") DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
protected:
virtual bool DataEquals(HValue* other) const {
HStoreGlobal* b = HStoreGlobal::cast(other);
return cell_.is_identical_to(b->cell());
}
private: private:
Handle<JSGlobalPropertyCell> cell_; Handle<JSGlobalPropertyCell> cell_;
bool check_hole_value_;
}; };
@ -2704,7 +2676,7 @@ class HLoadNamedGeneric: public HUnaryOperation {
HLoadNamedGeneric(HValue* object, Handle<Object> name) HLoadNamedGeneric(HValue* object, Handle<Object> name)
: HUnaryOperation(object), name_(name) { : HUnaryOperation(object), name_(name) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
HValue* object() const { return OperandAt(0); } HValue* object() const { return OperandAt(0); }
@ -2716,12 +2688,6 @@ class HLoadNamedGeneric: public HUnaryOperation {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic") DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
protected:
virtual bool DataEquals(HValue* other) const {
HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
return name_.is_identical_to(b->name_);
}
private: private:
Handle<Object> name_; Handle<Object> name_;
}; };
@ -2732,7 +2698,8 @@ class HLoadFunctionPrototype: public HUnaryOperation {
explicit HLoadFunctionPrototype(HValue* function) explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) { : HUnaryOperation(function) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(kDependsOnFunctionPrototypes); SetFlag(kUseGVN);
SetFlag(kDependsOnCalls);
} }
HValue* function() const { return OperandAt(0); } HValue* function() const { return OperandAt(0); }
@ -2781,13 +2748,16 @@ class HLoadKeyedFastElement: public HLoadKeyed {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
"load_keyed_fast_element") "load_keyed_fast_element")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
}; };
class HLoadKeyedGeneric: public HLoadKeyed { class HLoadKeyedGeneric: public HLoadKeyed {
public: public:
HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) { HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic") DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
@ -2823,12 +2793,6 @@ class HStoreNamed: public HBinaryOperation {
DECLARE_INSTRUCTION(StoreNamed) DECLARE_INSTRUCTION(StoreNamed)
protected:
virtual bool DataEquals(HValue* other) const {
HStoreNamed* b = HStoreNamed::cast(other);
return name_.is_identical_to(b->name_);
}
private: private:
Handle<Object> name_; Handle<Object> name_;
}; };
@ -2874,7 +2838,7 @@ class HStoreNamedGeneric: public HStoreNamed {
public: public:
HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val) HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
: HStoreNamed(obj, name, val) { : HStoreNamed(obj, name, val) {
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic") DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
@ -2939,7 +2903,7 @@ class HStoreKeyedGeneric: public HStoreKeyed {
public: public:
HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val) HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
: HStoreKeyed(obj, key, val) { : HStoreKeyed(obj, key, val) {
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic") DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
@ -2960,14 +2924,14 @@ class HStringCharCodeAt: public HBinaryOperation {
: Representation::Tagged(); : Representation::Tagged();
} }
virtual bool DataEquals(HValue* other) const { return true; }
HValue* string() const { return OperandAt(0); } HValue* string() const { return OperandAt(0); }
HValue* index() const { return OperandAt(1); } HValue* index() const { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at") DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange() { virtual Range* InferRange() {
return new Range(0, String::kMaxUC16CharCode); return new Range(0, String::kMaxUC16CharCode);
} }
@ -2990,11 +2954,11 @@ class HStringLength: public HUnaryOperation {
return HType::Smi(); return HType::Smi();
} }
virtual bool DataEquals(HValue* other) const { return true; }
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length") DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
protected: protected:
virtual bool DataEquals(HValue* other) const { return true; }
virtual Range* InferRange() { virtual Range* InferRange() {
return new Range(0, String::kMaxLength); return new Range(0, String::kMaxLength);
} }
@ -3128,7 +3092,7 @@ class HDeleteProperty: public HBinaryOperation {
HDeleteProperty(HValue* obj, HValue* key) HDeleteProperty(HValue* obj, HValue* key)
: HBinaryOperation(obj, key) { : HBinaryOperation(obj, key) {
set_representation(Representation::Tagged()); set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects()); SetAllSideEffects();
} }
virtual Representation RequiredInputRepresentation(int index) const { virtual Representation RequiredInputRepresentation(int index) const {

46
deps/v8/src/hydrogen.cc

@ -684,7 +684,7 @@ HGraph::HGraph(CompilationInfo* info)
} }
bool HGraph::AllowAggressiveOptimizations() const { bool HGraph::AllowCodeMotion() const {
return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount; return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
} }
@ -1446,19 +1446,23 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
} }
} }
// Only move instructions that postdominate the loop header (i.e. are
// always executed inside the loop). This is to avoid unnecessary
// deoptimizations assuming the loop is executed at least once.
// TODO(fschneider): Better type feedback should give us information
// about code that was never executed.
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr, bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) { HBasicBlock* loop_header) {
if (FLAG_aggressive_loop_invariant_motion && // If we've disabled code motion, don't move any instructions.
!instr->IsChange() && if (!graph_->AllowCodeMotion()) return false;
(!instr->IsCheckInstruction() ||
graph_->AllowAggressiveOptimizations())) { // If --aggressive-loop-invariant-motion, move everything except change
// instructions.
if (FLAG_aggressive_loop_invariant_motion && !instr->IsChange()) {
return true; return true;
} }
// Otherwise only move instructions that postdominate the loop header
// (i.e. are always executed inside the loop). This is to avoid
// unnecessary deoptimizations assuming the loop is executed at least
// once. TODO(fschneider): Better type feedback should give us
// information about code that was never executed.
HBasicBlock* block = instr->block(); HBasicBlock* block = instr->block();
bool result = true; bool result = true;
if (block != loop_header) { if (block != loop_header) {
@ -3366,9 +3370,10 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
LookupGlobalPropertyCell(var, &lookup, true); LookupGlobalPropertyCell(var, &lookup, true);
CHECK_BAILOUT; CHECK_BAILOUT;
bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
Handle<GlobalObject> global(graph()->info()->global_object()); Handle<GlobalObject> global(graph()->info()->global_object());
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup)); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
HInstruction* instr = new HStoreGlobal(value, cell); HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
instr->set_position(position); instr->set_position(position);
AddInstruction(instr); AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(ast_id); if (instr->HasSideEffects()) AddSimulate(ast_id);
@ -3385,7 +3390,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
// We have a second position recorded in the FullCodeGenerator to have // We have a second position recorded in the FullCodeGenerator to have
// type feedback for the binary operation. // type feedback for the binary operation.
BinaryOperation* operation = expr->binary_operation(); BinaryOperation* operation = expr->binary_operation();
operation->RecordTypeFeedback(oracle());
if (var != NULL) { if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) { if (!var->is_global() && !var->IsStackAllocated()) {
@ -3766,6 +3770,14 @@ void HGraphBuilder::VisitProperty(Property* expr) {
AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE)); AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
instr = new HJSArrayLength(array); instr = new HJSArrayLength(array);
} else if (expr->IsStringLength()) {
HValue* string = Pop();
AddInstruction(new HCheckNonSmi(string));
AddInstruction(new HCheckInstanceType(string,
FIRST_STRING_TYPE,
LAST_STRING_TYPE));
instr = new HStringLength(string);
} else if (expr->IsFunctionPrototype()) { } else if (expr->IsFunctionPrototype()) {
HValue* function = Pop(); HValue* function = Pop();
AddInstruction(new HCheckNonSmi(function)); AddInstruction(new HCheckNonSmi(function));
@ -3952,8 +3964,7 @@ bool HGraphBuilder::TryInline(Call* expr) {
int count_before = AstNode::Count(); int count_before = AstNode::Count();
// Parse and allocate variables. // Parse and allocate variables.
Handle<SharedFunctionInfo> shared(target->shared()); CompilationInfo inner_info(target);
CompilationInfo inner_info(shared);
if (!ParserApi::Parse(&inner_info) || if (!ParserApi::Parse(&inner_info) ||
!Scope::Analyze(&inner_info)) { !Scope::Analyze(&inner_info)) {
return false; return false;
@ -3976,9 +3987,10 @@ bool HGraphBuilder::TryInline(Call* expr) {
// Don't inline functions that uses the arguments object or that // Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters. // have a mismatching number of parameters.
Handle<SharedFunctionInfo> shared(target->shared());
int arity = expr->arguments()->length(); int arity = expr->arguments()->length();
if (function->scope()->arguments() != NULL || if (function->scope()->arguments() != NULL ||
arity != target->shared()->formal_parameter_count()) { arity != shared->formal_parameter_count()) {
return false; return false;
} }
@ -4801,7 +4813,7 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
default: default:
UNREACHABLE(); UNREACHABLE();
} }
TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT); TypeInfo info = oracle()->BinaryType(expr);
// If we hit an uninitialized binary op stub we will get type info // If we hit an uninitialized binary op stub we will get type info
// for a smi operation. If one of the operands is a constant string // for a smi operation. If one of the operands is a constant string
// do not generate code assuming it is a smi operation. // do not generate code assuming it is a smi operation.
@ -4952,7 +4964,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HValue* left = Pop(); HValue* left = Pop();
Token::Value op = expr->op(); Token::Value op = expr->op();
TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT); TypeInfo info = oracle()->CompareType(expr);
HInstruction* instr = NULL; HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) { if (op == Token::INSTANCEOF) {
// Check to see if the rhs of the instanceof is a global function not // Check to see if the rhs of the instanceof is a global function not

2
deps/v8/src/hydrogen.h

@ -297,7 +297,7 @@ class HGraph: public HSubgraph {
CompilationInfo* info() const { return info_; } CompilationInfo* info() const { return info_; }
bool AllowAggressiveOptimizations() const; bool AllowCodeMotion() const;
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; } const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; } const ZoneList<HPhi*>* phi_list() const { return phi_list_; }

111
deps/v8/src/ia32/code-stubs-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -91,7 +91,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space. // Try to allocate the context in new space.
Label gc; Label gc;
__ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize, int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT); eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack. // Get the function from the stack.
@ -100,7 +101,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header. // Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset), __ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(slots_))); Immediate(Smi::FromInt(length)));
// Setup the fixed slots. // Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL. __ Set(ebx, Immediate(0)); // Set to NULL.
@ -118,7 +119,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined. // Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value()); __ mov(ebx, Factory::undefined_value());
for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) { for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx); __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
} }
@ -1772,40 +1773,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING); ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD); ASSERT(op_ == Token::ADD);
// If one of the arguments is a string, call the string add stub. // Try to add arguments as strings, otherwise, transition to the generic
// Otherwise, transition to the generic TRBinaryOpIC type. // TRBinaryOpIC type.
GenerateAddStrings(masm);
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ test(right, Immediate(kSmiTagMask));
__ j(zero, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_runtime);
GenerateTypeTransition(masm); GenerateTypeTransition(masm);
} }
@ -2346,36 +2318,8 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&call_runtime); __ bind(&call_runtime);
switch (op_) { switch (op_) {
case Token::ADD: { case Token::ADD: {
GenerateAddStrings(masm);
GenerateRegisterArgsPush(masm); GenerateRegisterArgsPush(masm);
// Test for string arguments before calling runtime.
// Registers containing left and right operands respectively.
Register lhs, rhs;
lhs = edx;
rhs = eax;
// Test if left operand is a string.
NearLabel lhs_not_string;
__ test(lhs, Immediate(kSmiTagMask));
__ j(zero, &lhs_not_string);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &lhs_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
__ TailCallStub(&string_add_left_stub);
NearLabel call_add_runtime;
// Left operand is not a string, test right.
__ bind(&lhs_not_string);
__ test(rhs, Immediate(kSmiTagMask));
__ j(zero, &call_add_runtime);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_add_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_add_runtime);
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break; break;
} }
@ -2418,6 +2362,40 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
} }
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
NearLabel call_runtime;
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &left_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ test(right, Immediate(kSmiTagMask));
__ j(zero, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
__ j(above_equal, &call_runtime);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
// Neither argument is a string.
__ bind(&call_runtime);
}
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm, MacroAssembler* masm,
Label* alloc_failure) { Label* alloc_failure) {
@ -4660,8 +4638,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
bool do_gc, bool do_gc,
bool always_allocate_scope, bool always_allocate_scope) {
int /* alignment_skew */) {
// eax: result parameter for PerformGC, if any // eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved) // ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call) // ebp: frame pointer (restored after C call)

1
deps/v8/src/ia32/code-stubs-ia32.h

@ -308,6 +308,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm); void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm); void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm); void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm); void GenerateRegisterArgsPush(MacroAssembler* masm);

12
deps/v8/src/ia32/codegen-ia32.cc

@ -209,7 +209,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
// Allocate the local context if needed. // Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.
@ -8230,19 +8230,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return; return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to look up the context holding the named // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the // variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place. // arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1); frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi); frame_->EmitPush(esi);
frame_->EmitPush(Immediate(variable->name())); frame_->EmitPush(Immediate(variable->name()));
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
ASSERT(context.is_register());
frame_->EmitPush(context.reg());
context.Unuse();
frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
frame_->Push(&answer); frame_->Push(&answer);
return; return;
} }

112
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -106,44 +106,71 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) { Code* replacement_code) {
// The stack check code matches the pattern: // Iterate the unoptimized code and patch every stack check except at
// // the function entry. This code assumes the function entry stack
// cmp esp, <limit> // check appears first i.e., is not deferred or otherwise reordered.
// jae ok ASSERT(unoptimized_code->kind() == Code::FUNCTION);
// call <stack guard> bool first = true;
// test eax, <loop nesting depth> for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
// ok: ... !it.done();
// it.next()) {
// We will patch away the branch so the code is: RelocInfo* rinfo = it.rinfo();
// if (rinfo->target_address() == Code::cast(check_code)->entry()) {
// cmp esp, <limit> ;; Not changed if (first) {
// nop first = false;
// nop } else {
// call <on-stack replacment> // The stack check code matches the pattern:
// test eax, <loop nesting depth> //
// ok: // cmp esp, <limit>
Address call_target_address = rinfo->pc(); // jae ok
ASSERT(*(call_target_address - 3) == 0x73 && // jae // call <stack guard>
*(call_target_address - 2) == 0x07 && // offset // test eax, <loop nesting depth>
*(call_target_address - 1) == 0xe8); // call // ok: ...
*(call_target_address - 3) = 0x90; // nop //
*(call_target_address - 2) = 0x90; // nop // We will patch away the branch so the code is:
rinfo->set_target_address(replacement_code->entry()); //
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
rinfo->set_target_address(replacement_code->entry());
}
}
}
} }
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to Code* check_code,
// restore the conditional branch. Code* replacement_code) {
Address call_target_address = rinfo->pc(); // Iterate the unoptimized code and revert all the patched stack checks.
ASSERT(*(call_target_address - 3) == 0x90 && // nop for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
*(call_target_address - 2) == 0x90 && // nop !it.done();
*(call_target_address - 1) == 0xe8); // call it.next()) {
*(call_target_address - 3) = 0x73; // jae RelocInfo* rinfo = it.rinfo();
*(call_target_address - 2) = 0x07; // offset if (rinfo->target_address() == replacement_code->entry()) {
rinfo->set_target_address(check_code->entry()); // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
rinfo->set_target_address(check_code->entry());
}
}
} }
@ -507,26 +534,25 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(ebx, Operand(eax, Deoptimizer::input_offset())); __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers. // Fill in the input registers.
for (int i = 0; i < kNumberOfRegisters; i++) { for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kIntSize) + FrameDescription::registers_offset(); int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize)); __ pop(Operand(ebx, offset));
__ mov(Operand(ebx, offset), ecx);
} }
// Fill in the double input registers. // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset(); int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset; int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset)); __ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0); __ movdbl(Operand(ebx, dst_offset), xmm0);
} }
// Remove the bailout id and the general purpose registers from the stack. // Remove the bailout id and the double registers from the stack.
if (type() == EAGER) { if (type() == EAGER) {
__ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize)); __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else { } else {
__ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize)); __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
} }
// Compute a pointer to the unwinding limit in register ecx; that is // Compute a pointer to the unwinding limit in register ecx; that is
@ -591,7 +617,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Push the registers from the last output frame. // Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) { for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kIntSize) + FrameDescription::registers_offset(); int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(ebx, offset)); __ push(Operand(ebx, offset));
} }

15
deps/v8/src/ia32/full-codegen-ia32.cc

@ -142,7 +142,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi. // Argument to NewContext is the function, which is still in edi.
@ -764,6 +764,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies. // Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) { for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i); CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through. // The default is not a test, but remember it as final fall through.
if (clause->is_default()) { if (clause->is_default()) {
default_clause = clause; default_clause = clause;
@ -3689,19 +3691,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) { if (prop != NULL) {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForStackValue(prop->key()); VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) { } else if (var->is_global()) {
__ push(GlobalObjectOperand()); __ push(GlobalObjectOperand());
__ push(Immediate(var->name())); __ push(Immediate(var->name()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else { } else {
// Non-global variable. Call the runtime to look up the context // Non-global variable. Call the runtime to delete from the
// where the variable was introduced. // context where the variable was introduced.
__ push(context_register()); __ push(context_register());
__ push(Immediate(var->name())); __ push(Immediate(var->name()));
__ CallRuntime(Runtime::kLookupContext, 2); __ CallRuntime(Runtime::kDeleteContextSlot, 2);
__ push(eax);
__ push(Immediate(var->name()));
} }
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax); context()->Plug(eax);
} }
break; break;

6
deps/v8/src/ia32/ic-ia32.cc

@ -388,7 +388,8 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
} }
void LoadIC::GenerateStringLength(MacroAssembler* masm) { void LoadIC::GenerateStringLength(MacroAssembler* masm,
bool support_wrappers) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : receiver // -- eax : receiver
// -- ecx : name // -- ecx : name
@ -396,7 +397,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss); StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
support_wrappers);
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
} }

51
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -566,37 +566,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
} }
void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(
int deoptimization_index) { LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index); kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i); LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) { if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index()); safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
} }
} }
if (kind & Safepoint::kWithRegisters) {
// Register esi always contains a pointer to the context.
safepoint.DefinePointerRegister(esi);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
} }
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index) { int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
Safepoint safepoint = deoptimization_index);
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register esi always contains a pointer to the context.
safepoint.DefinePointerRegister(esi);
} }
@ -1908,7 +1911,19 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->InputAt(0)); Register value = ToRegister(instr->InputAt(0));
__ mov(Operand::Cell(instr->hydrogen()->cell()), value); Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
// If the cell we are storing to contains the hole it could have
// been deleted from the property dictionary. In that case, we need
// to update the property details in the property dictionary to mark
// it as no longer deleted. We deoptimize in that case.
if (instr->hydrogen()->check_hole_value()) {
__ cmp(cell_operand, Factory::the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Store the value.
__ mov(cell_operand, value);
} }

4
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -198,6 +198,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSin(LUnaryMathOperation* instr); void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information. // Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,

11
deps/v8/src/ia32/lithium-gap-resolver-ia32.cc

@ -32,12 +32,11 @@ namespace v8 {
namespace internal { namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner) LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), spilled_register_(-1) { : cgen_(owner),
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { moves_(32),
source_uses_[i] = 0; source_uses_(),
destination_uses_[i] = 0; destination_uses_(),
} spilled_register_(-1) {}
}
void LGapResolver::Resolve(LParallelMove* parallel_move) { void LGapResolver::Resolve(LParallelMove* parallel_move) {

7
deps/v8/src/ia32/lithium-ia32.cc

@ -1343,8 +1343,8 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand()); LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right); LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub); LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) { if (instr->CheckFlag(HValue::kCanOverflow)) {
@ -1645,7 +1645,8 @@ LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
return new LStoreGlobal(UseRegisterAtStart(instr->value())); LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
return instr->check_hole_value() ? AssignEnvironment(result) : result;
} }

8
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -339,7 +339,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
CpuFeatures::Scope scope(SSE2); CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(Operand(esp), Immediate(space)); sub(Operand(esp), Immediate(space));
int offset = -2 * kPointerSize; const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) { for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i); XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@ -382,7 +382,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all XMM registers. // Optionally restore all XMM registers.
if (save_doubles) { if (save_doubles) {
CpuFeatures::Scope scope(SSE2); CpuFeatures::Scope scope(SSE2);
int offset = -2 * kPointerSize; const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) { for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i); XMMRegister reg = XMMRegister::from_code(i);
movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
@ -1288,7 +1288,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
ExternalReference scheduled_exception_address = ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(); ExternalReference::scheduled_exception_address();
cmp(Operand::StaticVariable(scheduled_exception_address), cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(Factory::the_hole_value())); Immediate(Factory::the_hole_value()));
j(not_equal, &promote_scheduled_exception, not_taken); j(not_equal, &promote_scheduled_exception, not_taken);
LeaveApiExitFrame(); LeaveApiExitFrame();
ret(stack_space * kPointerSize); ret(stack_space * kPointerSize);

203
deps/v8/src/ia32/stub-cache-ia32.cc

@ -327,28 +327,32 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* miss) { Label* miss,
bool support_wrappers) {
Label check_wrapper; Label check_wrapper;
// Check if the object is a string leaving the instance type in the // Check if the object is a string leaving the instance type in the
// scratch register. // scratch register.
GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper); GenerateStringCheck(masm, receiver, scratch1, miss,
support_wrappers ? &check_wrapper : miss);
// Load length from the string and convert to a smi. // Load length from the string and convert to a smi.
__ mov(eax, FieldOperand(receiver, String::kLengthOffset)); __ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ ret(0); __ ret(0);
// Check if the object is a JSValue wrapper. if (support_wrappers) {
__ bind(&check_wrapper); // Check if the object is a JSValue wrapper.
__ cmp(scratch1, JS_VALUE_TYPE); __ bind(&check_wrapper);
__ j(not_equal, miss, not_taken); __ cmp(scratch1, JS_VALUE_TYPE);
__ j(not_equal, miss, not_taken);
// Check if the wrapped value is a string and load the length // Check if the wrapped value is a string and load the length
// directly if it is. // directly if it is.
__ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset)); __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
GenerateStringCheck(masm, scratch2, scratch1, miss, miss); GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
__ mov(eax, FieldOperand(scratch2, String::kLengthOffset)); __ mov(eax, FieldOperand(scratch2, String::kLengthOffset));
__ ret(0); __ ret(0);
}
} }
@ -451,10 +455,9 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) {
// Generates call to API function. // Generates call to API function.
static bool GenerateFastApiCall(MacroAssembler* masm, static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
const CallOptimization& optimization, const CallOptimization& optimization,
int argc, int argc) {
Failure** failure) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- esp[0] : return address // -- esp[0] : return address
// -- esp[4] : object passing the type check // -- esp[4] : object passing the type check
@ -516,13 +519,8 @@ static bool GenerateFastApiCall(MacroAssembler* masm,
// already generated). Do not allow the assembler to perform a // already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure // garbage collection but instead return the allocation failure
// object. // object.
MaybeObject* result = return masm->TryCallApiFunctionAndReturn(&fun,
masm->TryCallApiFunctionAndReturn(&fun, argc + kFastApiCallArguments + 1); argc + kFastApiCallArguments + 1);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
return true;
} }
@ -535,17 +533,16 @@ class CallInterceptorCompiler BASE_EMBEDDED {
arguments_(arguments), arguments_(arguments),
name_(name) {} name_(name) {}
bool Compile(MacroAssembler* masm, MaybeObject* Compile(MacroAssembler* masm,
JSObject* object, JSObject* object,
JSObject* holder, JSObject* holder,
String* name, String* name,
LookupResult* lookup, LookupResult* lookup,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Label* miss, Label* miss) {
Failure** failure) {
ASSERT(holder->HasNamedInterceptor()); ASSERT(holder->HasNamedInterceptor());
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
@ -566,8 +563,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
lookup, lookup,
name, name,
optimization, optimization,
miss, miss);
failure);
} else { } else {
CompileRegular(masm, CompileRegular(masm,
object, object,
@ -578,23 +574,22 @@ class CallInterceptorCompiler BASE_EMBEDDED {
name, name,
holder, holder,
miss); miss);
return true; return Heap::undefined_value(); // Success.
} }
} }
private: private:
bool CompileCacheable(MacroAssembler* masm, MaybeObject* CompileCacheable(MacroAssembler* masm,
JSObject* object, JSObject* object,
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
JSObject* interceptor_holder, JSObject* interceptor_holder,
LookupResult* lookup, LookupResult* lookup,
String* name, String* name,
const CallOptimization& optimization, const CallOptimization& optimization,
Label* miss_label, Label* miss_label) {
Failure** failure) {
ASSERT(optimization.is_constant_call()); ASSERT(optimization.is_constant_call());
ASSERT(!lookup->holder()->IsGlobalObject()); ASSERT(!lookup->holder()->IsGlobalObject());
@ -656,11 +651,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Invoke function. // Invoke function.
if (can_do_fast_api_call) { if (can_do_fast_api_call) {
bool success = GenerateFastApiCall(masm, optimization, MaybeObject* result =
arguments_.immediate(), failure); GenerateFastApiCall(masm, optimization, arguments_.immediate());
if (!success) { if (result->IsFailure()) return result;
return false;
}
} else { } else {
__ InvokeFunction(optimization.constant_function(), arguments_, __ InvokeFunction(optimization.constant_function(), arguments_,
JUMP_FUNCTION); JUMP_FUNCTION);
@ -679,7 +672,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
FreeSpaceForFastApiCall(masm, scratch1); FreeSpaceForFastApiCall(masm, scratch1);
} }
return true; return Heap::undefined_value(); // Success.
} }
void CompileRegular(MacroAssembler* masm, void CompileRegular(MacroAssembler* masm,
@ -1057,17 +1050,16 @@ void StubCompiler::GenerateLoadField(JSObject* object,
} }
bool StubCompiler::GenerateLoadCallback(JSObject* object, MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss, Label* miss) {
Failure** failure) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask)); __ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken); __ j(zero, miss, not_taken);
@ -1122,13 +1114,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
// already generated). Do not allow the assembler to perform a // already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure // garbage collection but instead return the allocation failure
// object. // object.
MaybeObject* result = masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace); return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
if (result->IsFailure()) {
*failure = Failure::cast(result);
return false;
}
return true;
} }
@ -2280,17 +2266,14 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
} }
if (depth != kInvalidProtoDepth) { if (depth != kInvalidProtoDepth) {
Failure* failure;
// Move the return address on top of the stack. // Move the return address on top of the stack.
__ mov(eax, Operand(esp, 3 * kPointerSize)); __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), eax);
// esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
// duplicate of return address and will be overwritten. // duplicate of return address and will be overwritten.
bool success = GenerateFastApiCall(masm(), optimization, argc, &failure); MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
if (!success) { if (result->IsFailure()) return result;
return failure;
}
} else { } else {
__ InvokeFunction(function, arguments(), JUMP_FUNCTION); __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
} }
@ -2335,21 +2318,17 @@ MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
CallInterceptorCompiler compiler(this, arguments(), ecx); CallInterceptorCompiler compiler(this, arguments(), ecx);
Failure* failure; MaybeObject* result = compiler.Compile(masm(),
bool success = compiler.Compile(masm(), object,
object, holder,
holder, name,
name, &lookup,
&lookup, edx,
edx, ebx,
ebx, edi,
edi, eax,
eax, &miss);
&miss, if (result->IsFailure()) return result;
&failure);
if (!success) {
return failure;
}
// Restore receiver. // Restore receiver.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@ -2603,14 +2582,24 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
Immediate(Handle<Map>(object->map()))); Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
// Store the value in the cell.
// Compute the cell operand to use.
Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
if (Serializer::enabled()) { if (Serializer::enabled()) {
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell))); __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax); cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
} else {
__ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
} }
// Check that the value in the cell is not the hole. If it is, this
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
__ cmp(cell_operand, Factory::the_hole_value());
__ j(equal, &miss);
// Store the value in the cell.
__ mov(cell_operand, eax);
// Return the value (register eax). // Return the value (register eax).
__ IncrementCounter(&Counters::named_store_global_inline, 1); __ IncrementCounter(&Counters::named_store_global_inline, 1);
__ ret(0); __ ret(0);
@ -2799,12 +2788,11 @@ MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
Failure* failure = Failure::InternalError(); MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, edi, callback, name, &miss);
callback, name, &miss, &failure); if (result->IsFailure()) {
if (!success) {
miss.Unuse(); miss.Unuse();
return failure; return result;
} }
__ bind(&miss); __ bind(&miss);
@ -2968,12 +2956,11 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
__ cmp(Operand(eax), Immediate(Handle<String>(name))); __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
Failure* failure = Failure::InternalError(); MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, ecx, edi, callback, name, &miss);
callback, name, &miss, &failure); if (result->IsFailure()) {
if (!success) {
miss.Unuse(); miss.Unuse();
return failure; return result;
} }
__ bind(&miss); __ bind(&miss);
@ -3089,7 +3076,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
__ cmp(Operand(eax), Immediate(Handle<String>(name))); __ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss); GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_string_length, 1); __ DecrementCounter(&Counters::keyed_load_string_length, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

59
deps/v8/src/ic.cc

@ -822,6 +822,9 @@ MaybeObject* LoadIC::Load(State state,
} }
if (FLAG_use_ic) { if (FLAG_use_ic) {
Code* non_monomorphic_stub =
(state == UNINITIALIZED) ? pre_monomorphic_stub() : megamorphic_stub();
// Use specialized code for getting the length of strings and // Use specialized code for getting the length of strings and
// string wrapper objects. The length property of string wrapper // string wrapper objects. The length property of string wrapper
// objects is read-only and therefore always returns the length of // objects is read-only and therefore always returns the length of
@ -829,22 +832,27 @@ MaybeObject* LoadIC::Load(State state,
if ((object->IsString() || object->IsStringWrapper()) && if ((object->IsString() || object->IsStringWrapper()) &&
name->Equals(Heap::length_symbol())) { name->Equals(Heap::length_symbol())) {
HandleScope scope; HandleScope scope;
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
object = Handle<Object>(Handle<JSValue>::cast(object)->value());
}
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n"); if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
#endif #endif
Map* map = HeapObject::cast(*object)->map(); if (state == PREMONOMORPHIC) {
if (object->IsString()) { if (object->IsString()) {
const int offset = String::kLengthOffset; Map* map = HeapObject::cast(*object)->map();
PatchInlinedLoad(address(), map, offset); const int offset = String::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
set_target(Builtins::builtin(Builtins::LoadIC_StringLength));
} else {
set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
}
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
set_target(Builtins::builtin(Builtins::LoadIC_StringWrapperLength));
} else {
set_target(non_monomorphic_stub);
}
// Get the string if we have a string wrapper object.
if (object->IsJSValue()) {
object = Handle<Object>(Handle<JSValue>::cast(object)->value());
} }
Code* target = NULL;
target = Builtins::builtin(Builtins::LoadIC_StringLength);
set_target(target);
return Smi::FromInt(String::cast(*object)->length()); return Smi::FromInt(String::cast(*object)->length());
} }
@ -853,12 +861,14 @@ MaybeObject* LoadIC::Load(State state,
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n"); if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif #endif
Map* map = HeapObject::cast(*object)->map(); if (state == PREMONOMORPHIC) {
const int offset = JSArray::kLengthOffset; Map* map = HeapObject::cast(*object)->map();
PatchInlinedLoad(address(), map, offset); const int offset = JSArray::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
Code* target = Builtins::builtin(Builtins::LoadIC_ArrayLength); set_target(Builtins::builtin(Builtins::LoadIC_ArrayLength));
set_target(target); } else {
set_target(non_monomorphic_stub);
}
return JSArray::cast(*object)->length(); return JSArray::cast(*object)->length();
} }
@ -868,8 +878,11 @@ MaybeObject* LoadIC::Load(State state,
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n"); if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
#endif #endif
Code* target = Builtins::builtin(Builtins::LoadIC_FunctionPrototype); if (state == PREMONOMORPHIC) {
set_target(target); set_target(Builtins::builtin(Builtins::LoadIC_FunctionPrototype));
} else {
set_target(non_monomorphic_stub);
}
return Accessors::FunctionGetPrototype(*object, 0); return Accessors::FunctionGetPrototype(*object, 0);
} }
} }
@ -1092,6 +1105,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
} }
if (FLAG_use_ic) { if (FLAG_use_ic) {
// TODO(1073): don't ignore the current stub state.
// Use specialized code for getting the length of strings. // Use specialized code for getting the length of strings.
if (object->IsString() && name->Equals(Heap::length_symbol())) { if (object->IsString() && name->Equals(Heap::length_symbol())) {
Handle<String> string = Handle<String>::cast(object); Handle<String> string = Handle<String>::cast(object);
@ -2098,8 +2113,6 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type); Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
if (!code.is_null()) { if (!code.is_null()) {
TRBinaryOpIC ic;
ic.patch(*code);
if (FLAG_trace_ic) { if (FLAG_trace_ic) {
PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n", PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
TRBinaryOpIC::GetName(previous_type), TRBinaryOpIC::GetName(previous_type),
@ -2107,6 +2120,8 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
TRBinaryOpIC::GetName(result_type), TRBinaryOpIC::GetName(result_type),
Token::Name(op)); Token::Name(op));
} }
TRBinaryOpIC ic;
ic.patch(*code);
// Activate inlined smi code. // Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) { if (previous_type == TRBinaryOpIC::UNINITIALIZED) {

3
deps/v8/src/ic.h

@ -284,7 +284,8 @@ class LoadIC: public IC {
// Specialized code generator routines. // Specialized code generator routines.
static void GenerateArrayLength(MacroAssembler* masm); static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateStringLength(MacroAssembler* masm); static void GenerateStringLength(MacroAssembler* masm,
bool support_wrappers);
static void GenerateFunctionPrototype(MacroAssembler* masm); static void GenerateFunctionPrototype(MacroAssembler* masm);
// Clear the use of the inlined version. // Clear the use of the inlined version.

10
deps/v8/src/mark-compact.cc

@ -33,6 +33,7 @@
#include "gdb-jit.h" #include "gdb-jit.h"
#include "global-handles.h" #include "global-handles.h"
#include "ic-inl.h" #include "ic-inl.h"
#include "liveobjectlist-inl.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "objects-visiting.h" #include "objects-visiting.h"
#include "stub-cache.h" #include "stub-cache.h"
@ -1660,6 +1661,7 @@ inline void EncodeForwardingAddressesInRange(Address start,
free_start = current; free_start = current;
is_prev_alive = false; is_prev_alive = false;
} }
LiveObjectList::ProcessNonLive(object);
} }
} }
@ -1880,6 +1882,9 @@ static void SweepNewSpace(NewSpace* space) {
size, size,
false); false);
} else { } else {
// Process the dead object before we write a NULL into its header.
LiveObjectList::ProcessNonLive(object);
size = object->Size(); size = object->Size();
Memory::Address_at(current) = NULL; Memory::Address_at(current) = NULL;
} }
@ -1899,6 +1904,7 @@ static void SweepNewSpace(NewSpace* space) {
// Update roots. // Update roots.
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces. // Update pointers in old spaces.
Heap::IterateDirtyRegions(Heap::old_pointer_space(), Heap::IterateDirtyRegions(Heap::old_pointer_space(),
@ -1986,6 +1992,7 @@ static void SweepSpace(PagedSpace* space) {
free_start = current; free_start = current;
is_previous_alive = false; is_previous_alive = false;
} }
LiveObjectList::ProcessNonLive(object);
} }
// The object is now unmarked for the call to Size() at the top of the // The object is now unmarked for the call to Size() at the top of the
// loop. // loop.
@ -2164,6 +2171,7 @@ class MapCompact {
void UpdateMapPointersInRoots() { void UpdateMapPointersInRoots() {
Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG); Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&map_updating_visitor_); GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
LiveObjectList::IterateElements(&map_updating_visitor_);
} }
void UpdateMapPointersInPagedSpace(PagedSpace* space) { void UpdateMapPointersInPagedSpace(PagedSpace* space) {
@ -2533,6 +2541,8 @@ void MarkCompactCollector::UpdatePointers() {
// Update the pointer to the head of the weak list of global contexts. // Update the pointer to the head of the weak list of global contexts.
updating_visitor.VisitPointer(&Heap::global_contexts_list_); updating_visitor.VisitPointer(&Heap::global_contexts_list_);
LiveObjectList::IterateElements(&updating_visitor);
int live_maps_size = IterateLiveObjects(Heap::map_space(), int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject); &UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(), int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),

36
deps/v8/src/messages.js

@ -82,20 +82,39 @@ function FormatString(format, args) {
var result = format; var result = format;
for (var i = 0; i < args.length; i++) { for (var i = 0; i < args.length; i++) {
var str; var str;
try { str = ToDetailString(args[i]); } try {
catch (e) { str = "#<error>"; } str = ToDetailString(args[i]);
} catch (e) {
str = "#<error>";
}
result = ArrayJoin.call(StringSplit.call(result, "%" + i), str); result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
} }
return result; return result;
} }
// To check if something is a native error we need to check the
// concrete native error types. It is not enough to check "obj
// instanceof $Error" because user code can replace
// NativeError.prototype.__proto__. User code cannot replace
// NativeError.prototype though and therefore this is a safe test.
function IsNativeErrorObject(obj) {
return (obj instanceof $Error) ||
(obj instanceof $EvalError) ||
(obj instanceof $RangeError) ||
(obj instanceof $ReferenceError) ||
(obj instanceof $SyntaxError) ||
(obj instanceof $TypeError) ||
(obj instanceof $URIError);
}
// When formatting internally created error messages, do not // When formatting internally created error messages, do not
// invoke overwritten error toString methods but explicitly use // invoke overwritten error toString methods but explicitly use
// the error to string method. This is to avoid leaking error // the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting. // objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) { function ToStringCheckErrorObject(obj) {
if (obj instanceof $Error) { if (IsNativeErrorObject(obj)) {
return %_CallFunction(obj, errorToString); return %_CallFunction(obj, errorToString);
} else { } else {
return ToString(obj); return ToString(obj);
@ -108,7 +127,9 @@ function ToDetailString(obj) {
var constructor = obj.constructor; var constructor = obj.constructor;
if (!constructor) return ToStringCheckErrorObject(obj); if (!constructor) return ToStringCheckErrorObject(obj);
var constructorName = constructor.name; var constructorName = constructor.name;
if (!constructorName) return ToStringCheckErrorObject(obj); if (!constructorName || !IS_STRING(constructorName)) {
return ToStringCheckErrorObject(obj);
}
return "#<" + GetInstanceName(constructorName) + ">"; return "#<" + GetInstanceName(constructorName) + ">";
} else { } else {
return ToStringCheckErrorObject(obj); return ToStringCheckErrorObject(obj);
@ -216,6 +237,13 @@ function FormatMessage(message) {
strict_param_dupe: "Strict mode function may not have duplicate parameter names", strict_param_dupe: "Strict mode function may not have duplicate parameter names",
strict_var_name: "Variable name may not be eval or arguments in strict mode", strict_var_name: "Variable name may not be eval or arguments in strict mode",
strict_function_name: "Function name may not be eval or arguments in strict mode", strict_function_name: "Function name may not be eval or arguments in strict mode",
strict_octal_literal: "Octal literals are not allowed in strict mode.",
strict_duplicate_property: "Duplicate data property in object literal not allowed in strict mode",
accessor_data_property: "Object literal may not have data and accessor property with the same name",
accessor_get_set: "Object literal may not have multiple get/set accessors with the same name",
strict_lhs_eval_assignment: "Assignment to eval or arguments is not allowed in strict mode",
strict_lhs_postfix: "Postfix increment/decrement may not have eval or arguments operand in strict mode",
strict_lhs_prefix: "Prefix increment/decrement may not have eval or arguments operand in strict mode",
}; };
} }
var format = kMessages[message.type]; var format = kMessages[message.type];

3
deps/v8/src/objects-inl.h

@ -57,8 +57,7 @@ Smi* PropertyDetails::AsSmi() {
PropertyDetails PropertyDetails::AsDeleted() { PropertyDetails PropertyDetails::AsDeleted() {
PropertyDetails d(DONT_ENUM, NORMAL); Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
return PropertyDetails(smi); return PropertyDetails(smi);
} }

215
deps/v8/src/parser.cc

@ -664,7 +664,11 @@ FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
TemporaryScope temp_scope(&this->temp_scope_); TemporaryScope temp_scope(&this->temp_scope_);
ZoneList<Statement*>* body = new ZoneList<Statement*>(16); ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true; bool ok = true;
int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok); ParseSourceElements(body, Token::EOS, &ok);
if (ok && temp_scope_->StrictMode()) {
CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
}
if (ok) { if (ok) {
result = new FunctionLiteral( result = new FunctionLiteral(
no_name, no_name,
@ -2288,6 +2292,11 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
expression = NewThrowReferenceError(type); expression = NewThrowReferenceError(type);
} }
if (temp_scope_->StrictMode()) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
Token::Value op = Next(); // Get assignment operator. Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos; int pos = scanner().location().beg_pos;
Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK); Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
@ -2514,6 +2523,12 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol(); Handle<String> type = Factory::invalid_lhs_in_prefix_op_symbol();
expression = NewThrowReferenceError(type); expression = NewThrowReferenceError(type);
} }
if (temp_scope_->StrictMode()) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
int position = scanner().location().beg_pos; int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(op, expression); IncrementOperation* increment = new IncrementOperation(op, expression);
return new CountOperation(true /* prefix */, increment, position); return new CountOperation(true /* prefix */, increment, position);
@ -2539,6 +2554,12 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol(); Handle<String> type = Factory::invalid_lhs_in_postfix_op_symbol();
expression = NewThrowReferenceError(type); expression = NewThrowReferenceError(type);
} }
if (temp_scope_->StrictMode()) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
Token::Value next = Next(); Token::Value next = Next();
int position = scanner().location().beg_pos; int position = scanner().location().beg_pos;
IncrementOperation* increment = new IncrementOperation(next, expression); IncrementOperation* increment = new IncrementOperation(next, expression);
@ -3012,6 +3033,126 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) {
return Factory::undefined_value(); return Factory::undefined_value();
} }
// Defined in ast.cc
bool IsEqualString(void* first, void* second);
bool IsEqualSmi(void* first, void* second);
// Validation per 11.1.5 Object Initialiser
class ObjectLiteralPropertyChecker {
public:
ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
props(&IsEqualString),
elems(&IsEqualSmi),
parser_(parser),
strict_(strict) {
}
void CheckProperty(
ObjectLiteral::Property* property,
Scanner::Location loc,
bool* ok);
private:
enum PropertyKind {
kGetAccessor = 0x01,
kSetAccessor = 0x02,
kAccessor = kGetAccessor | kSetAccessor,
kData = 0x04
};
static intptr_t GetPropertyKind(ObjectLiteral::Property* property) {
switch (property->kind()) {
case ObjectLiteral::Property::GETTER:
return kGetAccessor;
case ObjectLiteral::Property::SETTER:
return kSetAccessor;
default:
return kData;
}
}
HashMap props;
HashMap elems;
Parser* parser_;
bool strict_;
};
void ObjectLiteralPropertyChecker::CheckProperty(
ObjectLiteral::Property* property,
Scanner::Location loc,
bool* ok) {
ASSERT(property != NULL);
Literal *lit = property->key();
Handle<Object> handle = lit->handle();
uint32_t hash;
HashMap* map;
void* key;
Smi* smi_key_location;
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
smi_key_location = Smi::FromInt(hash);
key = &smi_key_location;
map = &elems;
} else {
key = handle.location();
hash = name->Hash();
map = &props;
}
} else if (handle->ToArrayIndex(&hash)) {
key = handle.location();
map = &elems;
} else {
ASSERT(handle->IsNumber());
double num = handle->Number();
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
map = &props;
}
// Lookup property previously defined, if any.
HashMap::Entry* entry = map->Lookup(key, hash, true);
intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
intptr_t curr = GetPropertyKind(property);
// Duplicate data properties are illegal in strict mode.
if (strict_ && (curr & prev & kData) != 0) {
parser_->ReportMessageAt(loc, "strict_duplicate_property",
Vector<const char*>::empty());
*ok = false;
return;
}
// Data property conflicting with an accessor.
if (((curr & kData) && (prev & kAccessor)) ||
((prev & kData) && (curr & kAccessor))) {
parser_->ReportMessageAt(loc, "accessor_data_property",
Vector<const char*>::empty());
*ok = false;
return;
}
// Two accessors of the same type conflicting
if ((curr & prev & kAccessor) != 0) {
parser_->ReportMessageAt(loc, "accessor_get_set",
Vector<const char*>::empty());
*ok = false;
return;
}
// Update map
entry->value = reinterpret_cast<void*> (prev | curr);
*ok = true;
}
void Parser::BuildObjectLiteralConstantProperties( void Parser::BuildObjectLiteralConstantProperties(
ZoneList<ObjectLiteral::Property*>* properties, ZoneList<ObjectLiteral::Property*>* properties,
@ -3117,12 +3258,20 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
new ZoneList<ObjectLiteral::Property*>(4); new ZoneList<ObjectLiteral::Property*>(4);
int number_of_boilerplate_properties = 0; int number_of_boilerplate_properties = 0;
ObjectLiteralPropertyChecker checker(this, temp_scope_->StrictMode());
Expect(Token::LBRACE, CHECK_OK); Expect(Token::LBRACE, CHECK_OK);
Scanner::Location loc = scanner().location();
while (peek() != Token::RBRACE) { while (peek() != Token::RBRACE) {
if (fni_ != NULL) fni_->Enter(); if (fni_ != NULL) fni_->Enter();
Literal* key = NULL; Literal* key = NULL;
Token::Value next = peek(); Token::Value next = peek();
// Location of the property name token
Scanner::Location loc = scanner().peek_location();
switch (next) { switch (next) {
case Token::IDENTIFIER: { case Token::IDENTIFIER: {
bool is_getter = false; bool is_getter = false;
@ -3132,11 +3281,15 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
if (fni_ != NULL) fni_->PushLiteralName(id); if (fni_ != NULL) fni_->PushLiteralName(id);
if ((is_getter || is_setter) && peek() != Token::COLON) { if ((is_getter || is_setter) && peek() != Token::COLON) {
// Update loc to point to the identifier
loc = scanner().peek_location();
ObjectLiteral::Property* property = ObjectLiteral::Property* property =
ParseObjectLiteralGetSet(is_getter, CHECK_OK); ParseObjectLiteralGetSet(is_getter, CHECK_OK);
if (IsBoilerplateProperty(property)) { if (IsBoilerplateProperty(property)) {
number_of_boilerplate_properties++; number_of_boilerplate_properties++;
} }
// Validate the property.
checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property); properties->Add(property);
if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK); if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK);
@ -3193,6 +3346,8 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
// Count CONSTANT or COMPUTED properties to maintain the enumeration order. // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++; if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++;
// Validate the property
checker.CheckProperty(property, loc, CHECK_OK);
properties->Add(property); properties->Add(property);
// TODO(1240767): Consider allowing trailing comma. // TODO(1240767): Consider allowing trailing comma.
@ -3204,6 +3359,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
} }
} }
Expect(Token::RBRACE, CHECK_OK); Expect(Token::RBRACE, CHECK_OK);
// Computation of literal_index must happen before pre parse bailout. // Computation of literal_index must happen before pre parse bailout.
int literal_index = temp_scope_->NextMaterializedLiteralIndex(); int literal_index = temp_scope_->NextMaterializedLiteralIndex();
@ -3296,10 +3452,21 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// '(' (Identifier)*[','] ')' // '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK); Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner().location().beg_pos; int start_pos = scanner().location().beg_pos;
Scanner::Location name_loc = Scanner::NoLocation();
Scanner::Location dupe_loc = Scanner::NoLocation();
bool done = (peek() == Token::RPAREN); bool done = (peek() == Token::RPAREN);
while (!done) { while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK); Handle<String> param_name = ParseIdentifier(CHECK_OK);
// Store locations for possible future error reports.
if (!name_loc.IsValid() && IsEvalOrArguments(param_name)) {
name_loc = scanner().location();
}
if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
dupe_loc = scanner().location();
}
Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR); Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
top_scope_->AddParameter(parameter); top_scope_->AddParameter(parameter);
num_parameters++; num_parameters++;
@ -3377,14 +3544,26 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
if (temp_scope_->StrictMode()) { if (temp_scope_->StrictMode()) {
if (IsEvalOrArguments(name)) { if (IsEvalOrArguments(name)) {
int position = function_token_position != RelocInfo::kNoPosition int position = function_token_position != RelocInfo::kNoPosition
? function_token_position ? function_token_position
: (start_pos > 0 ? start_pos - 1 : start_pos); : (start_pos > 0 ? start_pos - 1 : start_pos);
ReportMessageAt(Scanner::Location(position, start_pos), ReportMessageAt(Scanner::Location(position, start_pos),
"strict_function_name", Vector<const char*>::empty()); "strict_function_name", Vector<const char*>::empty());
*ok = false; *ok = false;
return NULL; return NULL;
} }
// TODO(mmaly): Check for octal escape sequence here. if (name_loc.IsValid()) {
ReportMessageAt(name_loc, "strict_param_name",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
if (dupe_loc.IsValid()) {
ReportMessageAt(dupe_loc, "strict_param_dupe",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
} }
FunctionLiteral* function_literal = FunctionLiteral* function_literal =
@ -3531,6 +3710,36 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
} }
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok) {
ASSERT(temp_scope_->StrictMode());
VariableProxy* lhs = expression != NULL
? expression->AsVariableProxy()
: NULL;
if (lhs != NULL && !lhs->is_this() && IsEvalOrArguments(lhs->name())) {
ReportMessage(error, Vector<const char*>::empty());
*ok = false;
}
}
// Checks whether octal literal last seen is between beg_pos and end_pos.
// If so, reports an error.
void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
int octal = scanner().octal_position();
if (beg_pos <= octal && octal <= end_pos) {
ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
Vector<const char*>::empty());
scanner().clear_octal_position();
*ok = false;
}
}
// This function reads an identifier and determines whether or not it // This function reads an identifier and determines whether or not it
// is 'get' or 'set'. The reason for not using ParseIdentifier and // is 'get' or 'set'. The reason for not using ParseIdentifier and
// checking on the output is that this involves heap allocation which // checking on the output is that this involves heap allocation which

8
deps/v8/src/parser.h

@ -613,6 +613,14 @@ class Parser {
bool* is_set, bool* is_set,
bool* ok); bool* ok);
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
const char* error,
bool* ok);
// Strict mode octal literal validation.
void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
// Parser support // Parser support
VariableProxy* Declare(Handle<String> name, Variable::Mode mode, VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
FunctionLiteral* fun, FunctionLiteral* fun,

2
deps/v8/src/platform-freebsd.cc

@ -430,7 +430,7 @@ Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) { Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
set_names(name); set_name(name);
} }

19
deps/v8/src/runtime-profiler.cc

@ -193,22 +193,9 @@ static void AttemptOnStackReplacement(JSFunction* function) {
if (maybe_check_code->ToObject(&check_code)) { if (maybe_check_code->ToObject(&check_code)) {
Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement); Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
Code* unoptimized_code = shared->code(); Code* unoptimized_code = shared->code();
// Iterate the unoptimized code and patch every stack check except at Deoptimizer::PatchStackCheckCode(unoptimized_code,
// the function entry. This code assumes the function entry stack Code::cast(check_code),
// check appears first i.e., is not deferred or otherwise reordered. replacement_code);
bool first = true;
for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
!it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == Code::cast(check_code)->entry()) {
if (first) {
first = false;
} else {
Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
}
}
}
} }
} }

49
deps/v8/src/runtime.cc

@ -6944,15 +6944,9 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
Handle<Code> check_code = check_stub.GetCode(); Handle<Code> check_code = check_stub.GetCode();
Handle<Code> replacement_code( Handle<Code> replacement_code(
Builtins::builtin(Builtins::OnStackReplacement)); Builtins::builtin(Builtins::OnStackReplacement));
// Iterate the unoptimized code and revert all the patched stack checks. Deoptimizer::RevertStackCheckCode(*unoptimized,
for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask); *check_code,
!it.done(); *replacement_code);
it.next()) {
RelocInfo* rinfo = it.rinfo();
if (rinfo->target_address() == replacement_code->entry()) {
Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
}
}
// Allow OSR only at nesting level zero again. // Allow OSR only at nesting level zero again.
unoptimized->set_allow_osr_at_loop_nesting_level(0); unoptimized->set_allow_osr_at_loop_nesting_level(0);
@ -7049,7 +7043,7 @@ static MaybeObject* Runtime_PushCatchContext(Arguments args) {
} }
static MaybeObject* Runtime_LookupContext(Arguments args) { static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
@ -7059,16 +7053,31 @@ static MaybeObject* Runtime_LookupContext(Arguments args) {
int index; int index;
PropertyAttributes attributes; PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS; ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder = Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
context->Lookup(name, flags, &index, &attributes);
if (index < 0 && !holder.is_null()) { // If the slot was not found the result is true.
ASSERT(holder->IsJSObject()); if (holder.is_null()) {
return *holder; return Heap::true_value();
} }
// No intermediate context found. Use global object by default. // If the slot was found in a context, it should be DONT_DELETE.
return Top::context()->global(); if (holder->IsContext()) {
return Heap::false_value();
}
// The slot was found in a JSObject, either a context extension object,
// the global object, or an arguments object. Try to delete it
// (respecting DONT_DELETE). For consistency with V8's usual behavior,
// which allows deleting all parameters in functions that mention
// 'arguments', we do this even for the case of slots found on an
// arguments object. The slot was found on an arguments object if the
// index is non-negative.
Handle<JSObject> object = Handle<JSObject>::cast(holder);
if (index >= 0) {
return object->DeleteElement(index, JSObject::NORMAL_DELETION);
} else {
return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
}
} }
@ -7141,8 +7150,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
int index; int index;
PropertyAttributes attributes; PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS; ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder = Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
context->Lookup(name, flags, &index, &attributes);
// If the index is non-negative, the slot has been found in a local // If the index is non-negative, the slot has been found in a local
// variable or a parameter. Read it from the context object or the // variable or a parameter. Read it from the context object or the
@ -7209,8 +7217,7 @@ static MaybeObject* Runtime_StoreContextSlot(Arguments args) {
int index; int index;
PropertyAttributes attributes; PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS; ContextLookupFlags flags = FOLLOW_CHAINS;
Handle<Object> holder = Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
context->Lookup(name, flags, &index, &attributes);
if (index >= 0) { if (index >= 0) {
if (holder->IsContext()) { if (holder->IsContext()) {

2
deps/v8/src/runtime.h

@ -284,7 +284,7 @@ namespace internal {
F(NewContext, 1, 1) \ F(NewContext, 1, 1) \
F(PushContext, 1, 1) \ F(PushContext, 1, 1) \
F(PushCatchContext, 1, 1) \ F(PushCatchContext, 1, 1) \
F(LookupContext, 2, 1) \ F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \ F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \ F(LoadContextSlotNoReferenceError, 2, 2) \
F(StoreContextSlot, 3, 1) \ F(StoreContextSlot, 3, 1) \

43
deps/v8/src/safepoint-table.cc

@ -117,24 +117,9 @@ void Safepoint::DefinePointerRegister(Register reg) {
} }
Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler, Safepoint SafepointTableBuilder::DefineSafepoint(
int deoptimization_index) { Assembler* assembler, Safepoint::Kind kind, int arguments,
ASSERT(deoptimization_index != -1); int deoptimization_index) {
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = 0;
pc_and_deoptimization_index.has_doubles = false;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(NULL);
return Safepoint(indexes_.last(), registers_.last());
}
Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
Assembler* assembler, int arguments, int deoptimization_index) {
ASSERT(deoptimization_index != -1); ASSERT(deoptimization_index != -1);
ASSERT(arguments >= 0); ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index; DeoptimizationInfo pc_and_deoptimization_index;
@ -142,30 +127,16 @@ Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
pc_and_deoptimization_index.deoptimization_index = deoptimization_index; pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset(); pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments; pc_and_deoptimization_index.arguments = arguments;
pc_and_deoptimization_index.has_doubles = false; pc_and_deoptimization_index.has_doubles = (kind & Safepoint::kWithDoubles);
deoptimization_info_.Add(pc_and_deoptimization_index); deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8)); indexes_.Add(new ZoneList<int>(8));
registers_.Add(new ZoneList<int>(4)); registers_.Add((kind & Safepoint::kWithRegisters)
? new ZoneList<int>(4)
: NULL);
return Safepoint(indexes_.last(), registers_.last()); return Safepoint(indexes_.last(), registers_.last());
} }
Safepoint SafepointTableBuilder::DefineSafepointWithRegistersAndDoubles(
Assembler* assembler, int arguments, int deoptimization_index) {
ASSERT(deoptimization_index != -1);
ASSERT(arguments >= 0);
DeoptimizationInfo pc_and_deoptimization_index;
pc_and_deoptimization_index.pc = assembler->pc_offset();
pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
pc_and_deoptimization_index.arguments = arguments;
pc_and_deoptimization_index.has_doubles = true;
deoptimization_info_.Add(pc_and_deoptimization_index);
indexes_.Add(new ZoneList<int>(8));
registers_.Add(new ZoneList<int>(4));
return Safepoint(indexes_.last(), registers_.last());
}
unsigned SafepointTableBuilder::GetCodeOffset() const { unsigned SafepointTableBuilder::GetCodeOffset() const {
ASSERT(emitted_); ASSERT(emitted_);
return offset_; return offset_;

25
deps/v8/src/safepoint-table.h

@ -180,6 +180,13 @@ class SafepointTable BASE_EMBEDDED {
class Safepoint BASE_EMBEDDED { class Safepoint BASE_EMBEDDED {
public: public:
typedef enum {
kSimple = 0,
kWithRegisters = 1 << 0,
kWithDoubles = 1 << 1,
kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
} Kind;
static const int kNoDeoptimizationIndex = static const int kNoDeoptimizationIndex =
(1 << (SafepointEntry::kDeoptIndexBits)) - 1; (1 << (SafepointEntry::kDeoptIndexBits)) - 1;
@ -210,23 +217,7 @@ class SafepointTableBuilder BASE_EMBEDDED {
// Define a new safepoint for the current position in the body. // Define a new safepoint for the current position in the body.
Safepoint DefineSafepoint( Safepoint DefineSafepoint(
Assembler* assembler, Assembler* assembler,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex); Safepoint::Kind kind,
// Define a new safepoint with registers on the stack for the
// current position in the body and take the number of arguments on
// top of the registers into account.
Safepoint DefineSafepointWithRegisters(
Assembler* assembler,
int arguments,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
// Define a new safepoint with all double registers and the normal
// registers on the stack for the current position in the body and
// take the number of arguments on top of the registers into account.
// TODO(1043) Rewrite the three SafepointTableBuilder::DefineSafepoint
// methods to one method that uses template arguments.
Safepoint DefineSafepointWithRegistersAndDoubles(
Assembler* assembler,
int arguments, int arguments,
int deoptimization_index = Safepoint::kNoDeoptimizationIndex); int deoptimization_index = Safepoint::kNoDeoptimizationIndex);

10
deps/v8/src/scanner-base.cc

@ -64,7 +64,8 @@ bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Scanner // Scanner
Scanner::Scanner() { } Scanner::Scanner()
: octal_pos_(kNoOctalLocation) { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) { uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@ -98,6 +99,7 @@ uc32 Scanner::ScanHexEscape(uc32 c, int length) {
// Octal escapes of the forms '\0xx' and '\xxx' are not a part of // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
// ECMA-262. Other JS VMs support them. // ECMA-262. Other JS VMs support them.
uc32 Scanner::ScanOctalEscape(uc32 c, int length) { uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
octal_pos_ = source_pos() - 1; // Already advanced
uc32 x = c - '0'; uc32 x = c - '0';
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
int d = c0_ - '0'; int d = c0_ - '0';
@ -601,7 +603,11 @@ Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
kind = DECIMAL; kind = DECIMAL;
break; break;
} }
if (c0_ < '0' || '7' < c0_) break; if (c0_ < '0' || '7' < c0_) {
// Octal literal finished.
octal_pos_ = next_.location.beg_pos;
break;
}
AddLiteralCharAdvance(); AddLiteralCharAdvance();
} }
} }

18
deps/v8/src/scanner-base.h

@ -247,6 +247,9 @@ class LiteralBuffer {
// Generic functionality used by both JSON and JavaScript scanners. // Generic functionality used by both JSON and JavaScript scanners.
class Scanner { class Scanner {
public: public:
// -1 is outside of the range of any real source code.
static const int kNoOctalLocation = -1;
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder; typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
class LiteralScope { class LiteralScope {
@ -271,15 +274,28 @@ class Scanner {
struct Location { struct Location {
Location(int b, int e) : beg_pos(b), end_pos(e) { } Location(int b, int e) : beg_pos(b), end_pos(e) { }
Location() : beg_pos(0), end_pos(0) { } Location() : beg_pos(0), end_pos(0) { }
bool IsValid() const {
return beg_pos >= 0 && end_pos >= beg_pos;
}
int beg_pos; int beg_pos;
int end_pos; int end_pos;
}; };
static Location NoLocation() {
return Location(-1, -1);
}
// Returns the location information for the current token // Returns the location information for the current token
// (the token returned by Next()). // (the token returned by Next()).
Location location() const { return current_.location; } Location location() const { return current_.location; }
Location peek_location() const { return next_.location; } Location peek_location() const { return next_.location; }
// Returns the location of the last seen octal literal
int octal_position() const { return octal_pos_; }
void clear_octal_position() { octal_pos_ = -1; }
// Returns the literal string, if any, for the current token (the // Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in // token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are // UTF-8 format; they may contain 0-characters. Literal strings are
@ -410,6 +426,8 @@ class Scanner {
// Input stream. Must be initialized to an UC16CharacterStream. // Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_; UC16CharacterStream* source_;
// Start position of the octal literal last scanned.
int octal_pos_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input. // One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_; uc32 c0_;

5
deps/v8/src/scopes.cc

@ -726,7 +726,6 @@ void Scope::ResolveVariable(Scope* global_scope,
// Note that we must do a lookup anyway, because if we find one, // Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this // we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object). // inner scope (the property may not be in the 'with' object).
if (var != NULL) var->set_is_used(true);
var = NonLocal(proxy->name(), Variable::DYNAMIC); var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else { } else {
@ -834,8 +833,8 @@ bool Scope::MustAllocate(Variable* var) {
// visible name. // visible name.
if ((var->is_this() || var->name()->length() > 0) && if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() || (var->is_accessed_from_inner_scope() ||
scope_calls_eval_ || scope_calls_eval_ || inner_scope_calls_eval_ ||
inner_scope_calls_eval_)) { scope_contains_with_)) {
var->set_is_used(true); var->set_is_used(true);
} }
// Global variables do not need to be allocated. // Global variables do not need to be allocated.

11
deps/v8/src/scopes.h

@ -288,6 +288,17 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope. // The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope); int ContextChainLength(Scope* scope);
// ---------------------------------------------------------------------------
// Strict mode support.
bool IsDeclared(Handle<String> name) {
// During formal parameter list parsing the scope only contains
// two variables inserted at initialization: "this" and "arguments".
// "this" is an invalid parameter name and "arguments" is invalid parameter
// name in strict mode. Therefore looking up with the map which includes
// "this" and "arguments" in addition to all formal parameters is safe.
return variables_.Lookup(name) != NULL;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugging. // Debugging.

4
deps/v8/src/serialize.cc

@ -335,7 +335,7 @@ void ExternalReferenceTable::PopulateTable() {
Add(ExternalReference::delete_handle_scope_extensions().address(), Add(ExternalReference::delete_handle_scope_extensions().address(),
RUNTIME_ENTRY, RUNTIME_ENTRY,
3, 4,
"HandleScope::DeleteExtensions"); "HandleScope::DeleteExtensions");
// Miscellaneous // Miscellaneous
@ -504,7 +504,7 @@ void ExternalReferenceTable::PopulateTable() {
"power_double_int_function"); "power_double_int_function");
Add(ExternalReference::arguments_marker_location().address(), Add(ExternalReference::arguments_marker_location().address(),
UNCLASSIFIED, UNCLASSIFIED,
40, 41,
"Factory::arguments_marker().location()"); "Factory::arguments_marker().location()");
} }

3
deps/v8/src/spaces.cc

@ -27,6 +27,7 @@
#include "v8.h" #include "v8.h"
#include "liveobjectlist-inl.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "mark-compact.h" #include "mark-compact.h"
#include "platform.h" #include "platform.h"
@ -3125,6 +3126,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Free the chunk. // Free the chunk.
MarkCompactCollector::ReportDeleteIfNeeded(object); MarkCompactCollector::ReportDeleteIfNeeded(object);
LiveObjectList::ProcessNonLive(object);
size_ -= static_cast<int>(chunk_size); size_ -= static_cast<int>(chunk_size);
objects_size_ -= object->Size(); objects_size_ -= object->Size();
page_count_--; page_count_--;

24
deps/v8/src/stub-cache.h

@ -427,7 +427,8 @@ class StubCompiler BASE_EMBEDDED {
Register receiver, Register receiver,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* miss_label); Label* miss_label,
bool support_wrappers);
static void GenerateLoadFunctionPrototype(MacroAssembler* masm, static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver, Register receiver,
@ -501,17 +502,16 @@ class StubCompiler BASE_EMBEDDED {
String* name, String* name,
Label* miss); Label* miss);
bool GenerateLoadCallback(JSObject* object, MaybeObject* GenerateLoadCallback(JSObject* object,
JSObject* holder, JSObject* holder,
Register receiver, Register receiver,
Register name_reg, Register name_reg,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
AccessorInfo* callback, AccessorInfo* callback,
String* name, String* name,
Label* miss, Label* miss);
Failure** failure);
void GenerateLoadConstant(JSObject* object, void GenerateLoadConstant(JSObject* object,
JSObject* holder, JSObject* holder,

14
deps/v8/src/top.cc

@ -72,7 +72,7 @@ void ThreadLocalTop::Initialize() {
handler_ = 0; handler_ = 0;
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
simulator_ = assembler::arm::Simulator::current(); simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
simulator_ = assembler::mips::Simulator::current(); simulator_ = assembler::mips::Simulator::current();
#endif #endif
@ -806,7 +806,7 @@ void Top::ComputeLocation(MessageLocation* target) {
} }
bool Top::ShouldReturnException(bool* is_caught_externally, bool Top::ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript) { bool catchable_by_javascript) {
// Find the top-most try-catch handler. // Find the top-most try-catch handler.
StackHandler* handler = StackHandler* handler =
@ -847,15 +847,15 @@ void Top::DoThrow(MaybeObject* exception,
Handle<Object> exception_handle(exception_object); Handle<Object> exception_handle(exception_object);
// Determine reporting and whether the exception is caught externally. // Determine reporting and whether the exception is caught externally.
bool is_caught_externally = false;
bool is_out_of_memory = exception == Failure::OutOfMemoryException(); bool is_out_of_memory = exception == Failure::OutOfMemoryException();
bool is_termination_exception = exception == Heap::termination_exception(); bool is_termination_exception = exception == Heap::termination_exception();
bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory; bool catchable_by_javascript = !is_termination_exception && !is_out_of_memory;
// Only real objects can be caught by JS. // Only real objects can be caught by JS.
ASSERT(!catchable_by_javascript || is_object); ASSERT(!catchable_by_javascript || is_object);
bool should_return_exception = bool is_caught_externally = false;
ShouldReturnException(&is_caught_externally, catchable_by_javascript); bool should_report_exception =
bool report_exception = catchable_by_javascript && should_return_exception; ShouldReportException(&is_caught_externally, catchable_by_javascript);
bool report_exception = catchable_by_javascript && should_report_exception;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger of exception. // Notify debugger of exception.
@ -1095,7 +1095,7 @@ char* Top::RestoreThread(char* from) {
// thread_local_ is restored on a separate OS thread. // thread_local_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
thread_local_.simulator_ = assembler::arm::Simulator::current(); thread_local_.simulator_ = Simulator::current();
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
thread_local_.simulator_ = assembler::mips::Simulator::current(); thread_local_.simulator_ = assembler::mips::Simulator::current();
#endif #endif

6
deps/v8/src/top.h

@ -109,7 +109,7 @@ class ThreadLocalTop BASE_EMBEDDED {
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
assembler::arm::Simulator* simulator_; Simulator* simulator_;
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
assembler::mips::Simulator* simulator_; assembler::mips::Simulator* simulator_;
#endif #endif
@ -386,7 +386,9 @@ class Top {
static void DoThrow(MaybeObject* exception, static void DoThrow(MaybeObject* exception,
MessageLocation* location, MessageLocation* location,
const char* message); const char* message);
static bool ShouldReturnException(bool* is_caught_externally, // Checks if exception should be reported and finds out if it's
// caught externally.
static bool ShouldReportException(bool* is_caught_externally,
bool catchable_by_javascript); bool catchable_by_javascript);
// Attempts to compute the current source location, storing the // Attempts to compute the current source location, storing the

4
deps/v8/src/type-info.cc

@ -171,7 +171,7 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
} }
TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) { TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position()); Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown(); TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown; if (!object->IsCode()) return unknown;
@ -198,7 +198,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
} }
TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) { TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
Handle<Object> object = GetElement(map_, expr->position()); Handle<Object> object = GetElement(map_, expr->position());
TypeInfo unknown = TypeInfo::Unknown(); TypeInfo unknown = TypeInfo::Unknown();
if (!object->IsCode()) return unknown; if (!object->IsCode()) return unknown;

10
deps/v8/src/type-info.h

@ -236,12 +236,6 @@ class CaseClause;
class TypeFeedbackOracle BASE_EMBEDDED { class TypeFeedbackOracle BASE_EMBEDDED {
public: public:
enum Side {
LEFT,
RIGHT,
RESULT
};
TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context); TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
bool LoadIsMonomorphic(Property* expr); bool LoadIsMonomorphic(Property* expr);
@ -261,8 +255,8 @@ class TypeFeedbackOracle BASE_EMBEDDED {
bool LoadIsBuiltin(Property* expr, Builtins::Name id); bool LoadIsBuiltin(Property* expr, Builtins::Name id);
// Get type information for arithmetic operations and compares. // Get type information for arithmetic operations and compares.
TypeInfo BinaryType(BinaryOperation* expr, Side side); TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr, Side side); TypeInfo CompareType(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause); TypeInfo SwitchType(CaseClause* clause);
private: private:

2
deps/v8/src/v8.cc

@ -79,7 +79,7 @@ bool V8::Initialize(Deserializer* des) {
// Initialize other runtime facilities // Initialize other runtime facilities
#if defined(USE_SIMULATOR) #if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
::assembler::arm::Simulator::Initialize(); Simulator::Initialize();
#elif defined(V8_TARGET_ARCH_MIPS) #elif defined(V8_TARGET_ARCH_MIPS)
::assembler::mips::Simulator::Initialize(); ::assembler::mips::Simulator::Initialize();
#endif #endif

6
deps/v8/src/variables.cc

@ -112,12 +112,12 @@ Variable::Variable(Scope* scope,
: scope_(scope), : scope_(scope),
name_(name), name_(name),
mode_(mode), mode_(mode),
is_valid_LHS_(is_valid_LHS),
kind_(kind), kind_(kind),
local_if_not_shadowed_(NULL), local_if_not_shadowed_(NULL),
rewrite_(NULL),
is_valid_LHS_(is_valid_LHS),
is_accessed_from_inner_scope_(false), is_accessed_from_inner_scope_(false),
is_used_(false), is_used_(false) {
rewrite_(NULL) {
// names must be canonicalized for fast equality checks // names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
} }

12
deps/v8/src/variables.h

@ -187,21 +187,23 @@ class Variable: public ZoneObject {
Scope* scope_; Scope* scope_;
Handle<String> name_; Handle<String> name_;
Mode mode_; Mode mode_;
bool is_valid_LHS_;
Kind kind_; Kind kind_;
Variable* local_if_not_shadowed_; Variable* local_if_not_shadowed_;
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
bool is_used_;
// Static type information // Static type information
StaticType type_; StaticType type_;
// Code generation. // Code generation.
// rewrite_ is usually a Slot or a Property, but may be any expression. // rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_; Expression* rewrite_;
// Valid as a LHS? (const and this are not valid LHS, for example)
bool is_valid_LHS_;
// Usage info.
bool is_accessed_from_inner_scope_; // set by variable resolver
bool is_used_;
}; };

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 0 #define MINOR_VERSION 0
#define BUILD_NUMBER 10 #define BUILD_NUMBER 12
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

2
deps/v8/src/x64/assembler-x64-inl.h

@ -425,7 +425,7 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
// Use SIB with no index register only for base rsp or r12. Otherwise we // Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely. // would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12)); ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits(); buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit(); rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2; len_ = 2;
} }

60
deps/v8/src/x64/assembler-x64.cc

@ -300,6 +300,34 @@ Operand::Operand(const Operand& operand, int32_t offset) {
} }
} }
bool Operand::AddressUsesRegister(Register reg) const {
int code = reg.code();
ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
// Start with only low three bits of base register. Initial decoding doesn't
// distinguish on the REX.B bit.
int base_code = buf_[0] & 0x07;
if (base_code == rsp.code()) {
// SIB byte present in buf_[1].
// Check the index register from the SIB byte + REX.X prefix.
int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
// Index code (including REX.X) of 0x04 (rsp) means no index register.
if (index_code != rsp.code() && index_code == code) return true;
// Add REX.B to get the full base register code.
base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
// A base register of 0x05 (rbp) with mod = 0 means no base register.
if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
return code == base_code;
} else {
// A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
// no base register.
if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
base_code |= ((rex_ & 0x01) << 3);
return code == base_code;
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Assembler. // Implementation of Assembler.
@ -1949,6 +1977,14 @@ void Assembler::push(Immediate value) {
} }
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x68);
emitl(imm32);
}
void Assembler::pushfq() { void Assembler::pushfq() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -2641,6 +2677,30 @@ void Assembler::movq(Register dst, XMMRegister src) {
} }
void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_rex_64(src, dst);
emit(0x0F);
emit(0x7F);
emit_sse_operand(src, dst);
}
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_rex_64(dst, src);
emit(0x0F);
emit(0x6F);
emit_sse_operand(dst, src);
}
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8)); ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);

23
deps/v8/src/x64/assembler-x64.h

@ -153,6 +153,7 @@ struct Register {
// Unfortunately we can't make this private in a struct when initializing // Unfortunately we can't make this private in a struct when initializing
// by assignment. // by assignment.
int code_; int code_;
private: private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters]; static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters]; static const int allocationIndexByRegisterCode[kNumRegisters];
@ -390,11 +391,15 @@ class Operand BASE_EMBEDDED {
// this must not overflow. // this must not overflow.
Operand(const Operand& base, int32_t offset); Operand(const Operand& base, int32_t offset);
// Checks whether either base or index register is the given register.
// Does not check the "reg" part of the Operand.
bool AddressUsesRegister(Register reg) const;
private: private:
byte rex_; byte rex_;
byte buf_[6]; byte buf_[6];
// The number of bytes in buf_. // The number of bytes of buf_ in use.
unsigned int len_; byte len_;
// Set the ModR/M byte without an encoded 'reg' register. The // Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation. // register is encoded later as part of the emit_operand operation.
@ -590,6 +595,9 @@ class Assembler : public Malloced {
void popfq(); void popfq();
void push(Immediate value); void push(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
void push_imm32(int32_t imm32);
void push(Register src); void push(Register src);
void push(const Operand& src); void push(const Operand& src);
@ -821,6 +829,10 @@ class Assembler : public Malloced {
arithmetic_op_32(0x23, dst, src); arithmetic_op_32(0x23, dst, src);
} }
void andl(Register dst, const Operand& src) {
arithmetic_op_32(0x23, dst, src);
}
void andb(Register dst, Immediate src) { void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src); immediate_arithmetic_op_8(0x4, dst, src);
} }
@ -1205,6 +1217,9 @@ class Assembler : public Malloced {
void movsd(XMMRegister dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src); void movsd(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movss(XMMRegister dst, const Operand& src); void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src); void movss(const Operand& dst, XMMRegister src);
@ -1245,10 +1260,6 @@ class Assembler : public Malloced {
void emit_sse_operand(XMMRegister dst, Register src); void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src); void emit_sse_operand(Register dst, XMMRegister src);
// Use either movsd or movlpd.
// void movdbl(XMMRegister dst, const Operand& src);
// void movdbl(const Operand& dst, XMMRegister src);
// Debugging // Debugging
void Print(); void Print();

28
deps/v8/src/x64/builtins-x64.cc

@ -561,7 +561,33 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
__ int3(); // Enter an internal frame.
__ EnterInternalFrame();
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
// Tear down temporary frame.
__ LeaveInternalFrame();
// Get the full codegen state from the stack and untag it.
__ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
NearLabel not_no_registers, not_tos_rax;
__ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
__ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax);
__ ret(2 * kPointerSize); // Remove state, rax.
__ bind(&not_tos_rax);
__ Abort("no cases left");
} }
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {

16
deps/v8/src/x64/code-stubs-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -91,7 +91,8 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
void FastNewContextStub::Generate(MacroAssembler* masm) { void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space. // Try to allocate the context in new space.
Label gc; Label gc;
__ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize, int length = slots_ + Context::MIN_CONTEXT_SLOTS;
__ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
rax, rbx, rcx, &gc, TAG_OBJECT); rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack. // Get the function from the stack.
@ -100,7 +101,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header. // Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex); __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
__ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_)); __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots. // Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL. __ Set(rbx, 0); // Set to NULL.
@ -115,7 +116,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Initialize the rest of the slots to undefined. // Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex); __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) { for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ movq(Operand(rax, Context::SlotOffset(i)), rbx); __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
} }
@ -2773,8 +2774,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_termination_exception, Label* throw_termination_exception,
Label* throw_out_of_memory_exception, Label* throw_out_of_memory_exception,
bool do_gc, bool do_gc,
bool always_allocate_scope, bool always_allocate_scope) {
int /* alignment_skew */) {
// rax: result parameter for PerformGC, if any. // rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved). // rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call). // rbp: frame pointer (restored after C call).
@ -2867,7 +2867,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned); __ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame. // Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(); __ LeaveExitFrame(save_doubles_);
__ ret(0); __ ret(0);
// Handling of failure. // Handling of failure.
@ -2976,7 +2976,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#else #else
int arg_stack_space = 0; int arg_stack_space = 0;
#endif #endif
__ EnterExitFrame(arg_stack_space); __ EnterExitFrame(arg_stack_space, save_doubles_);
// rax: Holds the context at this point, but should not be used. // rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold // On entry to code generated by GenerateCore, it must hold

12
deps/v8/src/x64/codegen-x64.cc

@ -206,7 +206,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AllocateStackSlots(); frame_->AllocateStackSlots();
// Allocate the local context if needed. // Allocate the local context if needed.
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.
@ -7235,19 +7235,13 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
return; return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) { } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to look up the context holding the named // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the // variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place. // arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1); frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(rsi); frame_->EmitPush(rsi);
frame_->EmitPush(variable->name()); frame_->EmitPush(variable->name());
Result context = frame_->CallRuntime(Runtime::kLookupContext, 2); Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
ASSERT(context.is_register());
frame_->EmitPush(context.reg());
context.Unuse();
frame_->EmitPush(variable->name());
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
frame_->Push(&answer); frame_->Push(&answer);
return; return;
} }

442
deps/v8/src/x64/deoptimizer-x64.cc

@ -41,18 +41,82 @@ namespace internal {
int Deoptimizer::table_entry_size_ = 10; int Deoptimizer::table_entry_size_ = 10;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// UNIMPLEMENTED, for now just return. AssertNoAllocation no_allocation;
return;
if (!function->IsOptimized()) return;
// Get the optimized code.
Code* code = function->code();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
CodePatcher patcher(
code->instruction_start() + pc_offset + gap_code_size,
Assembler::kCallInstructionLength);
patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
RelocInfo::NONE);
last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
}
}
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
CHECK(code->safepoint_table_start() >= last_pc_offset);
unsigned instructions = code->safepoint_table_start() - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
destroyer.masm()->int3();
}
#endif
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
node->set_next(deoptimizing_code_list_);
deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
} }
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo, void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) { Code* replacement_code) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) { void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -64,20 +128,382 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) { int frame_index) {
UNIMPLEMENTED(); // Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
ASSERT(frame_index >= 0 && frame_index < output_count_);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
intptr_t top_address;
if (is_bottommost) {
// 2 = context and function in the frame.
top_address =
input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; context\n",
top_address + output_offset, output_offset, value);
}
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
// The function for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; function\n",
top_address + output_offset, output_offset, value);
}
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = (bailout_type_ == EAGER)
? Builtins::builtin(Builtins::NotifyDeoptimized)
: Builtins::builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
if (output_count_ - 1 == frame_index) iterator->Done();
} }
#define __ masm()->
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {
// UNIMPLEMENTED, for now just return. GeneratePrologue();
return; CpuFeatures::Scope scope(SSE2);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ subq(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
}
// We push all registers onto the stack, even though we do not need
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::toRegister(i);
__ push(r);
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
kDoubleRegsSize;
// When calling new_deoptimizer_function we need to pass the last argument
// on the stack on windows and in r8 on linux. The remaining arguments are
// all passed in registers (different ones on linux and windows though).
#ifdef _WIN64
Register arg4 = r9;
Register arg3 = r8;
Register arg2 = rdx;
Register arg1 = rcx;
#else
Register arg4 = rcx;
Register arg3 = rdx;
Register arg2 = rsi;
Register arg1 = rdi;
#endif
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
Register arg5 = r11;
// Get the bailout id from the stack.
__ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
// Get the address of the location in the code object if possible
// and compute the fp-to-sp delta in register arg5.
if (type() == EAGER) {
__ Set(arg4, 0);
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
} else {
__ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
__ subq(arg5, rbp);
__ neg(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(5);
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movq(arg1, rax);
__ movq(arg2, Immediate(type()));
// Args 3 and 4 are already in the right registers.
// On windows put the argument on the stack (PrepareCallCFunction have
// created space for this). On linux pass the argument in r8.
#ifdef _WIN64
__ movq(Operand(rsp, 0 * kPointerSize), arg5);
#else
__ movq(r8, arg5);
#endif
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
// Preserve deoptimizer object in register rax and get the input
// frame descriptor pointer.
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ pop(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
// Remove the bailout id from the stack.
if (type() == EAGER) {
__ addq(rsp, Immediate(kPointerSize));
} else {
__ addq(rsp, Immediate(2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ addq(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ push(rax);
__ PrepareCallCFunction(1);
__ movq(arg1, rax);
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
__ pop(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(rbx, src_offset));
}
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ push(Operand(rbx, FrameDescription::state_offset()));
}
__ push(Operand(rbx, FrameDescription::pc_offset()));
__ push(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(rbx, offset));
}
// Restore the registers from the stack.
for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
Register r = Register::toRegister(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
if (r.is(rsp)) {
ASSERT(i > 0);
r = Register::toRegister(i - 1);
}
__ pop(r);
}
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
__ movq(r13, roots_address);
__ movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
RelocInfo::NONE);
// Return to the continuation point.
__ ret(0);
} }
void Deoptimizer::TableEntryGenerator::GeneratePrologue() { void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UNIMPLEMENTED(); // Create a sequence of deoptimization entries.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ push_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&done);
} }
#undef __
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64 #endif // V8_TARGET_ARCH_X64

8
deps/v8/src/x64/disasm-x64.cc

@ -1025,11 +1025,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
rex_w() ? 'q' : 'd', rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop)); NameOfXMMRegister(regop));
current += PrintRightOperand(current); current += PrintRightOperand(current);
} else if (opcode == 0x6F) {
AppendToBuffer("movdqa %s,",
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x7E) { } else if (opcode == 0x7E) {
AppendToBuffer("mov%c ", AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd'); rex_w() ? 'q' : 'd');
current += PrintRightOperand(current); current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop)); AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
AppendToBuffer("movdqa ");
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else { } else {
const char* mnemonic = "?"; const char* mnemonic = "?";
if (opcode == 0x57) { if (opcode == 0x57) {

15
deps/v8/src/x64/full-codegen-x64.cc

@ -88,7 +88,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
bool function_in_register = true; bool function_in_register = true;
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots(); int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context"); Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi. // Argument to NewContext is the function, which is still in rdi.
@ -710,6 +710,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Compile all the tests with branches to their bodies. // Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) { for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i); CaseClause* clause = clauses->at(i);
clause->body_target()->entry_label()->Unuse();
// The default is not a test, but remember it as final fall through. // The default is not a test, but remember it as final fall through.
if (clause->is_default()) { if (clause->is_default()) {
default_clause = clause; default_clause = clause;
@ -3006,19 +3008,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (prop != NULL) { if (prop != NULL) {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForStackValue(prop->key()); VisitForStackValue(prop->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) { } else if (var->is_global()) {
__ push(GlobalObjectOperand()); __ push(GlobalObjectOperand());
__ Push(var->name()); __ Push(var->name());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else { } else {
// Non-global variable. Call the runtime to look up the context // Non-global variable. Call the runtime to delete from the
// where the variable was introduced. // context where the variable was introduced.
__ push(context_register()); __ push(context_register());
__ Push(var->name()); __ Push(var->name());
__ CallRuntime(Runtime::kLookupContext, 2); __ CallRuntime(Runtime::kDeleteContextSlot, 2);
__ push(rax);
__ Push(var->name());
} }
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax); context()->Plug(rax);
} }
break; break;

5
deps/v8/src/x64/ic-x64.cc

@ -397,7 +397,7 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
} }
void LoadIC::GenerateStringLength(MacroAssembler* masm) { void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : receiver // -- rax : receiver
// -- rcx : name // -- rcx : name
@ -405,7 +405,8 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss); StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
support_wrappers);
__ bind(&miss); __ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
} }

662
deps/v8/src/x64/lithium-codegen-x64.cc

@ -37,157 +37,6 @@ namespace v8 {
namespace internal { namespace internal {
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()-> #define __ masm()->
bool LCodeGen::GenerateCode() { bool LCodeGen::GenerateCode() {
@ -339,6 +188,10 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() { bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done()); ASSERT(is_done());
// Ensure that patching a deoptimization point won't overwrite the table.
for (int i = 0; i < Assembler::kCallInstructionLength; i++) {
masm()->int3();
}
safepoints_.Emit(masm(), StackSlotCount()); safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted(); return !is_aborted();
} }
@ -567,7 +420,24 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
Abort("Unimplemented: %s", "Deoptimiz"); RegisterEnvironmentForDeoptimization(environment);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
ASSERT(entry != NULL);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
NearLabel done;
__ j(NegateCondition(cc), &done);
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&done);
}
} }
@ -629,37 +499,40 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
} }
void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(
int deoptimization_index) { LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); const ZoneList<LOperand*>* operands = pointers->operands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
deoptimization_index); kind, arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) { for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i); LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) { if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index()); safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer));
} }
} }
if (kind & Safepoint::kWithRegisters) {
// Register rsi always contains a pointer to the context.
safepoint.DefinePointerRegister(rsi);
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
int deoptimization_index) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
} }
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
int deoptimization_index) { int deoptimization_index) {
const ZoneList<LOperand*>* operands = pointers->operands(); RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
Safepoint safepoint = deoptimization_index);
safepoints_.DefineSafepointWithRegisters(
masm(), arguments, deoptimization_index);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index());
} else if (pointer->IsRegister()) {
safepoint.DefinePointerRegister(ToRegister(pointer));
}
}
// Register rsi always contains a pointer to the context.
safepoint.DefinePointerRegister(rsi);
} }
@ -682,86 +555,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) { void LCodeGen::DoParallelMove(LParallelMove* move) {
// xmm0 must always be a scratch register. resolver_.Resolve(move);
XMMRegister xmm_scratch = xmm0;
LUnallocated marker_operand(LUnallocated::NONE);
Register cpu_scratch = kScratchRegister;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
if (from->IsConstantOperand()) {
LConstantOperand* constant_from = LConstantOperand::cast(from);
if (to->IsRegister()) {
if (IsInteger32Constant(constant_from)) {
__ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToRegister(to), ToHandle(constant_from));
}
} else {
if (IsInteger32Constant(constant_from)) {
__ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
} else {
__ Move(ToOperand(to), ToHandle(constant_from));
}
}
} else if (from == &marker_operand) {
if (to->IsRegister()) {
__ movq(ToRegister(to), cpu_scratch);
} else if (to->IsStackSlot()) {
__ movq(ToOperand(to), cpu_scratch);
} else if (to->IsDoubleRegister()) {
__ movsd(ToDoubleRegister(to), xmm_scratch);
} else {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), xmm_scratch);
}
} else if (to == &marker_operand) {
if (from->IsRegister()) {
__ movq(cpu_scratch, ToRegister(from));
} else if (from->IsStackSlot()) {
__ movq(cpu_scratch, ToOperand(from));
} else if (from->IsDoubleRegister()) {
__ movsd(xmm_scratch, ToDoubleRegister(from));
} else {
ASSERT(from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
}
} else if (from->IsRegister()) {
if (to->IsRegister()) {
__ movq(ToRegister(to), ToRegister(from));
} else {
__ movq(ToOperand(to), ToRegister(from));
}
} else if (to->IsRegister()) {
__ movq(ToRegister(to), ToOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ push(rax);
__ movq(rax, ToOperand(from));
__ movq(ToOperand(to), rax);
__ pop(rax);
} else if (from->IsDoubleRegister()) {
ASSERT(to->IsDoubleStackSlot());
__ movsd(ToOperand(to), ToDoubleRegister(from));
} else if (to->IsDoubleRegister()) {
ASSERT(from->IsDoubleStackSlot());
__ movsd(ToDoubleRegister(to), ToOperand(from));
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
__ movsd(xmm_scratch, ToOperand(from));
__ movsd(ToOperand(to), xmm_scratch);
}
}
} }
@ -820,7 +614,22 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) { void LCodeGen::DoSubI(LSubI* instr) {
Abort("Unimplemented: %s", "DoSubI"); LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
__ subl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
__ subl(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
} }
@ -1146,7 +955,18 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
Abort("Unimplemented: %s", "DoCmpJSObjectEq"); Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
NearLabel different, done;
__ cmpq(left, right);
__ j(not_equal, &different);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&different);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
} }
@ -1162,7 +982,45 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
void LCodeGen::DoIsNull(LIsNull* instr) { void LCodeGen::DoIsNull(LIsNull* instr) {
Abort("Unimplemented: %s", "DoIsNull"); Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// If the expression is known to be a smi, then it's
// definitely not null. Materialize false.
// Consider adding other type and representation tests too.
if (instr->hydrogen()->value()->type().IsSmi()) {
__ LoadRoot(result, Heap::kFalseValueRootIndex);
return;
}
__ CompareRoot(reg, Heap::kNullValueRootIndex);
if (instr->is_strict()) {
__ movl(result, Immediate(Heap::kTrueValueRootIndex));
NearLabel load;
__ j(equal, &load);
__ movl(result, Immediate(Heap::kFalseValueRootIndex));
__ bind(&load);
__ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
} else {
NearLabel true_value, false_value, done;
__ j(equal, &true_value);
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, &true_value);
__ JumpIfSmi(reg, &false_value);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = result;
__ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value);
__ bind(&false_value);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&true_value);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
} }
@ -1204,56 +1062,77 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Condition LCodeGen::EmitIsObject(Register input, Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object, Label* is_not_object,
Label* is_object) { Label* is_object) {
ASSERT(!input.is(temp1)); ASSERT(!input.is(kScratchRegister));
ASSERT(!input.is(temp2));
ASSERT(!temp1.is(temp2));
__ JumpIfSmi(input, is_not_object); __ JumpIfSmi(input, is_not_object);
__ Cmp(input, Factory::null_value()); __ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object); __ j(equal, is_object);
__ movq(temp1, FieldOperand(input, HeapObject::kMapOffset)); __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined. // Undetectable objects behave like undefined.
__ testb(FieldOperand(temp1, Map::kBitFieldOffset), __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable)); Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object); __ j(not_zero, is_not_object);
__ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset)); __ movzxbl(kScratchRegister,
__ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE)); FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, is_not_object); __ j(below, is_not_object);
__ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE)); __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal; return below_equal;
} }
void LCodeGen::DoIsObject(LIsObject* instr) { void LCodeGen::DoIsObject(LIsObject* instr) {
Abort("Unimplemented: %s", "DoIsObject"); Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ jmp(&done);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
} }
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0)); Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id()); int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block); Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block); Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label); Condition true_cond = EmitIsObject(reg, false_label, true_label);
EmitBranch(true_block, false_block, true_cond); EmitBranch(true_block, false_block, true_cond);
} }
void LCodeGen::DoIsSmi(LIsSmi* instr) { void LCodeGen::DoIsSmi(LIsSmi* instr) {
Abort("Unimplemented: %s", "DoIsSmi"); LOperand* input_operand = instr->InputAt(0);
Register result = ToRegister(instr->result());
if (input_operand->IsRegister()) {
Register input = ToRegister(input_operand);
__ CheckSmiToIndicator(result, input);
} else {
Operand input = ToOperand(instr->InputAt(0));
__ CheckSmiToIndicator(result, input);
}
// result is zero if input is a smi, and one otherwise.
ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
__ movq(result, Operand(kRootRegister, result, times_pointer_size,
Heap::kTrueValueRootIndex * kPointerSize));
} }
@ -1386,7 +1265,25 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTest(LClassOfTest* instr) { void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Abort("Unimplemented: %s", "DoClassOfTest"); Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
NearLabel done;
Label is_true, is_false;
EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
__ j(not_equal, &is_false);
__ bind(&is_true);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
} }
@ -1408,7 +1305,12 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Abort("Unimplemented: %s", "DoCmpMapAndBranch"); Register reg = ToRegister(instr->InputAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
EmitBranch(true_block, false_block, equal);
} }
@ -1493,12 +1395,32 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) { void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
Abort("Unimplemented: %s", "DoLoadGlobal"); Register result = ToRegister(instr->result());
if (result.is(rax)) {
__ load_rax(instr->hydrogen()->cell().location(),
RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
__ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(result, Operand(result, 0));
}
if (instr->hydrogen()->check_hole_value()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
}
} }
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Abort("Unimplemented: %s", "DoStoreGlobal"); Register value = ToRegister(instr->InputAt(0));
if (value.is(rax)) {
__ store_rax(instr->hydrogen()->cell().location(),
RelocInfo::GLOBAL_PROPERTY_CELL);
} else {
__ movq(kScratchRegister,
Handle<Object>::cast(instr->hydrogen()->cell()),
RelocInfo::GLOBAL_PROPERTY_CELL);
__ movq(Operand(kScratchRegister, 0), value);
}
} }
@ -1508,7 +1430,14 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField"); Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
} else {
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
}
} }
@ -1558,17 +1487,39 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) { void LCodeGen::DoPushArgument(LPushArgument* instr) {
Abort("Unimplemented: %s", "DoPushArgument"); LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
LConstantOperand* const_op = LConstantOperand::cast(argument);
Handle<Object> literal = chunk_->LookupLiteral(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ push(Immediate(static_cast<int32_t>(literal->Number())));
} else if (r.IsDouble()) {
Abort("unsupported double immediate");
} else {
ASSERT(r.IsTagged());
__ Push(literal);
}
} else if (argument->IsRegister()) {
__ push(ToRegister(argument));
} else {
ASSERT(!argument->IsDoubleRegister());
__ push(ToOperand(argument));
}
} }
void LCodeGen::DoGlobalObject(LGlobalObject* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Abort("Unimplemented: %s", "DoGlobalObject"); Register result = ToRegister(instr->result());
__ movq(result, GlobalObjectOperand());
} }
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
Abort("Unimplemented: %s", "DoGlobalReceiver"); Register result = ToRegister(instr->result());
__ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ movq(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
} }
@ -1665,7 +1616,12 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) { void LCodeGen::DoCallNew(LCallNew* instr) {
Abort("Unimplemented: %s", "DoCallNew"); ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Set(rax, instr->arity());
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
} }
@ -1675,7 +1631,32 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Abort("Unimplemented: %s", "DoStoreNamedField"); Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
int offset = instr->offset();
if (!instr->transition().is_null()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
}
// Do the store.
if (instr->is_in_object()) {
__ movq(FieldOperand(object, offset), value);
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->TempAt(0));
__ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ movq(FieldOperand(temp, offset), value);
if (instr->needs_write_barrier()) {
// Update the write barrier for the properties array.
// object is used as a scratch register.
__ RecordWrite(temp, offset, value, object);
}
}
} }
@ -1700,27 +1681,63 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
Abort("Unimplemented: %s", "DoInteger32ToDouble"); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
__ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
} }
void LCodeGen::DoNumberTagI(LNumberTagI* instr) { void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
Abort("Unimplemented: %s", "DoNumberTagI"); LOperand* input = instr->InputAt(0);
} ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { __ Integer32ToSmi(reg, reg);
Abort("Unimplemented: %s", "DoDeferredNumberTagI");
} }
void LCodeGen::DoNumberTagD(LNumberTagD* instr) { void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Abort("Unimplemented: %s", "DoNumberTagD"); class DeferredNumberTagD: public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
private:
LNumberTagD* instr_;
};
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->TempAt(0));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} }
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
Abort("Unimplemented: %s", "DoDeferredNumberTagD"); // TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
__ Move(reg, Smi::FromInt(0));
__ PushSafepointRegisters();
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Ensure that value in rax survives popping registers.
__ movq(kScratchRegister, rax);
__ PopSafepointRegisters();
__ movq(reg, kScratchRegister);
} }
@ -1737,7 +1754,34 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg, void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg, XMMRegister result_reg,
LEnvironment* env) { LEnvironment* env) {
Abort("Unimplemented: %s", "EmitNumberUntagD"); NearLabel load_smi, heap_number, done;
// Smi check.
__ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(equal, &heap_number);
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, env);
// Convert undefined to NaN. Compute NaN as 0/0.
__ xorpd(result_reg, result_reg);
__ divsd(result_reg, result_reg);
__ jmp(&done);
// Heap number to XMM conversion.
__ bind(&heap_number);
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ jmp(&done);
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
__ cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
} }
@ -1762,7 +1806,13 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
Abort("Unimplemented: %s", "DoCheckSmi"); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Condition cc = masm()->CheckSmi(ToRegister(input));
if (instr->condition() != equal) {
cc = NegateCondition(cc);
}
DeoptimizeIf(cc, instr->environment());
} }
@ -1772,12 +1822,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Abort("Unimplemented: %s", "DoCheckFunction"); ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
__ Cmp(reg, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
} }
void LCodeGen::DoCheckMap(LCheckMap* instr) { void LCodeGen::DoCheckMap(LCheckMap* instr) {
Abort("Unimplemented: %s", "DoCheckMap"); LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
instr->hydrogen()->map());
DeoptimizeIf(not_equal, instr->environment());
} }
@ -1787,7 +1845,29 @@ void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Abort("Unimplemented: %s", "DoCheckPrototypeMaps"); Register reg = ToRegister(instr->TempAt(0));
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
} }

55
deps/v8/src/x64/lithium-codegen-x64.h

@ -34,37 +34,15 @@
#include "deoptimizer.h" #include "deoptimizer.h"
#include "safepoint-table.h" #include "safepoint-table.h"
#include "scopes.h" #include "scopes.h"
#include "x64/lithium-gap-resolver-x64.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Forward declarations. // Forward declarations.
class LDeferredCode; class LDeferredCode;
class LGapNode;
class SafepointGenerator; class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED { class LCodeGen BASE_EMBEDDED {
public: public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@ -80,10 +58,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()), scope_(chunk->graph()->info()->scope()),
status_(UNUSED), status_(UNUSED),
deferred_(8), deferred_(8),
osr_pc_offset_(-1) { osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions(); PopulateDeoptimizationLiteralsWithInlinedFunctions();
} }
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the // Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the // chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded. // code generation attempt succeeded.
@ -95,7 +87,6 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support. // Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr); void DoDeferredStackCheck(LGoto* instr);
@ -129,7 +120,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; } LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction(); LInstruction* GetNextInstruction();
@ -190,13 +180,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const; Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const; XMMRegister ToDoubleRegister(int index) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
@ -209,6 +192,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathSin(LUnaryMathOperation* instr); void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information. // Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers, void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments, int arguments,
@ -231,8 +218,6 @@ class LCodeGen BASE_EMBEDDED {
// Returns the condition on which a final split to // Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough. // true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input, Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object, Label* is_not_object,
Label* is_object); Label* is_object);

320
deps/v8/src/x64/lithium-gap-resolver-x64.cc

@ -0,0 +1,320 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_X64)
#include "x64/lithium-gap-resolver-x64.h"
#include "x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
PerformMove(i);
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph. We use operand swaps to resolve cycles,
// which means that a call to PerformMove could change any source operand
// in the move graph.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack-allocated local. Recursion may allow
// multiple moves to be pending.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does
// not miss any). Assume there is a non-blocking move with source A
// and this move is blocked on source B and there is a swap of A and
// B. Then A and B must be involved in the same cycle (or they would
// not be swapped). Since this move's destination is B and there is
// only a single incoming edge to an operand, this move must also be
// involved in the same cycle. In that case, the blocking move will
// be created but will be "pending" when we return from PerformMove.
PerformMove(i);
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// This move's source may have changed due to swaps to resolve cycles and
// so it may now be the last move in the cycle. If so remove it.
if (moves_[index].source()->Equals(destination)) {
moves_[index].Eliminate();
return;
}
// The move may be blocked on a (at most one) pending move, in which case
// we have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
EmitSwap(index);
return;
}
}
// This move is not blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register src = cgen_->ToRegister(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ movq(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movq(dst, src);
}
} else if (source->IsStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ movq(dst, src);
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
__ movq(kScratchRegister, src);
__ movq(dst, kScratchRegister);
}
} else if (source->IsConstantOperand()) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsInteger32Constant(constant_source)) {
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
} else {
ASSERT(destination->IsStackSlot());
Operand dst = cgen_->ToOperand(destination);
if (cgen_->IsInteger32Constant(constant_source)) {
// Allow top 32 bits of an untagged Integer32 to be arbitrary.
__ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
}
} else if (source->IsDoubleRegister()) {
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ movsd(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(cgen_->ToOperand(destination), src);
}
} else if (source->IsDoubleStackSlot()) {
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
__ movsd(cgen_->ToDoubleRegister(destination), src);
} else {
ASSERT(destination->IsDoubleStackSlot());
__ movsd(xmm0, src);
__ movsd(cgen_->ToOperand(destination), xmm0);
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
void LGapResolver::EmitSwap(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Swap two general-purpose registers.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
__ xchg(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
// Swap a general-purpose register and a stack slot.
Register reg =
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
__ movq(kScratchRegister, mem);
__ movq(mem, reg);
__ movq(reg, kScratchRegister);
} else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
// Swap two stack slots or two double stack slots.
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
__ movsd(xmm0, src);
__ movq(kScratchRegister, dst);
__ movsd(dst, xmm0);
__ movq(src, kScratchRegister);
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
// Swap two double registers.
XMMRegister source_reg = cgen_->ToDoubleRegister(source);
XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
__ movsd(xmm0, source_reg);
__ movsd(source_reg, destination_reg);
__ movsd(destination_reg, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// Swap a double register and a double stack slot.
ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
(source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
LOperand* other = source->IsDoubleRegister() ? destination : source;
ASSERT(other->IsDoubleStackSlot());
Operand other_operand = cgen_->ToOperand(other);
__ movsd(xmm0, other_operand);
__ movsd(other_operand, reg);
__ movsd(reg, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
}
// The swap of source and destination has executed a move from source to
// destination.
moves_[index].Eliminate();
// Any unperformed (including pending) move with a source of either
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(source)) {
moves_[i].set_source(destination);
} else if (other_move.Blocks(destination)) {
moves_[i].set_source(source);
}
}
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64

74
deps/v8/src/x64/lithium-gap-resolver-x64.h

@ -0,0 +1,74 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
};
} } // namespace v8::internal
#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_

188
deps/v8/src/x64/lithium-x64.cc

@ -974,12 +974,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsIsObject()) { } else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v); HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged()); ASSERT(compare->value()->representation().IsTagged());
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2);
} else if (v->IsCompareJSObjectEq()) { } else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v); HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()), return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1048,20 +1043,19 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
Abort("Unimplemented: %s", "DoPushArgument"); ++argument_count_;
return NULL; LOperand* argument = UseOrConstant(instr->argument());
return new LPushArgument(argument);
} }
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) { LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
Abort("Unimplemented: %s", "DoGlobalObject"); return DefineAsRegister(new LGlobalObject);
return NULL;
} }
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
Abort("Unimplemented: %s", "DoGlobalReceiver"); return DefineAsRegister(new LGlobalReceiver);
return NULL;
} }
@ -1103,8 +1097,10 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
Abort("Unimplemented: %s", "DoCallNew"); LOperand* constructor = UseFixed(instr->constructor(), rdi);
return NULL; argument_count_ -= instr->argument_count();
LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, rax), instr);
} }
@ -1181,8 +1177,23 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
LInstruction* LChunkBuilder::DoSub(HSub* instr) { LInstruction* LChunkBuilder::DoSub(HSub* instr) {
Abort("Unimplemented: %s", "DoSub"); if (instr->representation().IsInteger32()) {
return NULL; ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::SUB, instr);
}
} }
@ -1243,26 +1254,34 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
LInstruction* LChunkBuilder::DoCompareJSObjectEq( LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) { HCompareJSObjectEq* instr) {
Abort("Unimplemented: %s", "DoCompareJSObjectEq"); LOperand* left = UseRegisterAtStart(instr->left());
return NULL; LOperand* right = UseRegisterAtStart(instr->right());
LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
} }
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) { LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
Abort("Unimplemented: %s", "DoIsNull"); ASSERT(instr->value()->representation().IsTagged());
return NULL; LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value));
} }
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) { LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
Abort("Unimplemented: %s", "DoIsObject"); ASSERT(instr->value()->representation().IsTagged());
return NULL; LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LIsObject(value));
} }
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) { LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
Abort("Unimplemented: %s", "DoIsSmi"); ASSERT(instr->value()->representation().IsTagged());
return NULL; LOperand* value = UseAtStart(instr->value());
return DefineAsRegister(new LIsSmi(value));
} }
@ -1316,14 +1335,69 @@ LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) { LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Abort("Unimplemented: %s", "DoChange"); Representation from = instr->from();
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
if (needs_check) {
LOperand* xmm_temp =
(instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
? NULL
: FixedTemp(xmm1);
LTaggedToI* res = new LTaggedToI(value, xmm_temp);
return AssignEnvironment(DefineSameAsFirst(res));
} else {
return DefineSameAsFirst(new LSmiUntag(value, needs_check));
}
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new LNumberTagD(value, temp);
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
!CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
}
}
UNREACHABLE();
return NULL; return NULL;
} }
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) { LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
Abort("Unimplemented: %s", "DoCheckNonSmi"); LOperand* value = UseRegisterAtStart(instr->value());
return NULL; return AssignEnvironment(new LCheckSmi(value, zero));
} }
@ -1334,26 +1408,28 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
Abort("Unimplemented: %s", "DoCheckPrototypeMaps"); LOperand* temp = TempRegister();
return NULL; LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
return AssignEnvironment(result);
} }
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
Abort("Unimplemented: %s", "DoCheckSmi"); LOperand* value = UseRegisterAtStart(instr->value());
return NULL; return AssignEnvironment(new LCheckSmi(value, not_zero));
} }
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
Abort("Unimplemented: %s", "DoCheckFunction"); LOperand* value = UseRegisterAtStart(instr->value());
return NULL; return AssignEnvironment(new LCheckFunction(value));
} }
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) { LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
Abort("Unimplemented: %s", "DoCheckMap"); LOperand* value = UseRegisterAtStart(instr->value());
return NULL; LCheckMap* result = new LCheckMap(value);
return AssignEnvironment(result);
} }
@ -1381,15 +1457,15 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) { LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
Abort("Unimplemented: %s", "DoLoadGlobal"); LLoadGlobal* result = new LLoadGlobal;
return NULL; return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
} }
LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) { LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
Abort("Unimplemented: %s", "DoStoreGlobal"); return new LStoreGlobal(UseRegisterAtStart(instr->value()));}
return NULL;
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
@ -1399,8 +1475,9 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
Abort("Unimplemented: %s", "DoLoadNamedField"); ASSERT(instr->representation().IsTagged());
return NULL; LOperand* obj = UseRegisterAtStart(instr->object());
return DefineAsRegister(new LLoadNamedField(obj));
} }
@ -1450,8 +1527,22 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
Abort("Unimplemented: %s", "DoStoreNamedField"); bool needs_write_barrier = instr->NeedsWriteBarrier();
return NULL;
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
return new LStoreNamedField(obj, val, temp);
} }
@ -1588,7 +1679,14 @@ LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
Abort("Unimplemented: %s", "DoEnterInlined"); HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
false,
undefined);
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL; return NULL;
} }

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save