Browse Source

Upgrade v8 to 1.3.11

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
ab530bb211
  1. 13
      deps/v8/ChangeLog
  2. 34
      deps/v8/SConstruct
  3. 6
      deps/v8/include/v8.h
  4. 4
      deps/v8/src/api.cc
  5. 29
      deps/v8/src/arm/assembler-arm-inl.h
  6. 33
      deps/v8/src/arm/assembler-arm.cc
  7. 24
      deps/v8/src/arm/assembler-arm.h
  8. 36
      deps/v8/src/arm/codegen-arm.cc
  9. 4
      deps/v8/src/arm/codegen-arm.h
  10. 48
      deps/v8/src/arm/debug-arm.cc
  11. 42
      deps/v8/src/arm/macro-assembler-arm.cc
  12. 29
      deps/v8/src/arm/macro-assembler-arm.h
  13. 12
      deps/v8/src/arm/virtual-frame-arm.cc
  14. 2
      deps/v8/src/assembler.cc
  15. 2
      deps/v8/src/debug.cc
  16. 10
      deps/v8/src/factory.cc
  17. 46
      deps/v8/src/ia32/assembler-ia32.cc
  18. 5
      deps/v8/src/ia32/assembler-ia32.h
  19. 203
      deps/v8/src/ia32/codegen-ia32.cc
  20. 4
      deps/v8/src/ia32/ic-ia32.cc
  21. 4
      deps/v8/src/ia32/macro-assembler-ia32.cc
  22. 2
      deps/v8/src/ia32/macro-assembler-ia32.h
  23. 2
      deps/v8/src/ic-inl.h
  24. 2
      deps/v8/src/ic.cc
  25. 2
      deps/v8/src/ic.h
  26. 13
      deps/v8/src/mark-compact.cc
  27. 4
      deps/v8/src/objects.cc
  28. 6
      deps/v8/src/objects.h
  29. 2
      deps/v8/src/runtime.cc
  30. 2
      deps/v8/src/spaces.h
  31. 18
      deps/v8/src/third_party/dtoa/dtoa.c
  32. 30
      deps/v8/src/top.cc
  33. 5
      deps/v8/src/v8.h
  34. 4
      deps/v8/src/version.cc
  35. 50
      deps/v8/src/x64/assembler-x64.cc
  36. 17
      deps/v8/src/x64/assembler-x64.h
  37. 44
      deps/v8/src/x64/builtins-x64.cc
  38. 2
      deps/v8/src/x64/cfg-x64.cc
  39. 638
      deps/v8/src/x64/codegen-x64.cc
  40. 51
      deps/v8/src/x64/ic-x64.cc
  41. 750
      deps/v8/src/x64/macro-assembler-x64.cc
  42. 251
      deps/v8/src/x64/macro-assembler-x64.h
  43. 62
      deps/v8/src/x64/stub-cache-x64.cc
  44. 4
      deps/v8/src/x64/virtual-frame-x64.cc
  45. 2
      deps/v8/test/cctest/cctest.status
  46. 8
      deps/v8/test/cctest/test-conversions.cc
  47. 7
      deps/v8/test/cctest/test-debug.cc
  48. 28
      deps/v8/test/cctest/test-strings.cc
  49. 3
      deps/v8/test/mjsunit/array-splice.js
  50. 25
      deps/v8/test/mjsunit/mjsunit.status
  51. 60
      deps/v8/test/mjsunit/smi-negative-zero.js
  52. 4
      deps/v8/tools/gyp/v8.gyp
  53. 11
      deps/v8/tools/presubmit.py
  54. 6
      deps/v8/tools/test.py
  55. 2
      deps/v8/tools/v8.xcodeproj/project.pbxproj
  56. 2
      deps/v8/tools/visual_studio/common.vsprops

13
deps/v8/ChangeLog

@ -1,3 +1,16 @@
2009-09-15: Version 1.3.11
Fixed crash in error reporting during bootstrapping.
Optimized generated IA32 math code by using SSE2 instructions when
available.
Implemented missing pieces of debugger infrastructure on ARM. The
debugger is now fully functional on ARM.
Make 'hidden' the default visibility for gcc.
2009-09-09: Version 1.3.10 2009-09-09: Version 1.3.10
Fixed profiler on Mac in 64-bit mode. Fixed profiler on Mac in 64-bit mode.

34
deps/v8/SConstruct

@ -96,13 +96,18 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = { LIBRARY_FLAGS = {
'all': { 'all': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')], 'CPPPATH': [join(root_dir, 'src')],
'regexp:native': { 'regexp:native': {
'CPPDEFINES': ['V8_NATIVE_REGEXP'] 'CPPDEFINES': ['V8_NATIVE_REGEXP']
}, },
'mode:debug': { 'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS'] 'CPPDEFINES': ['V8_ENABLE_CHECKS']
},
'profilingsupport:on': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
},
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
} }
}, },
'gcc': { 'gcc': {
@ -110,11 +115,14 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'], 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
}, },
'visibility:hidden': {
# Use visibility=default to disable this.
'CXXFLAGS': ['-fvisibility=hidden']
},
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['-g', '-O0'], 'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'], 'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': { 'os:android': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb'] 'CCFLAGS': ['-mthumb']
} }
}, },
@ -123,7 +131,7 @@ LIBRARY_FLAGS = {
'-ffunction-sections'], '-ffunction-sections'],
'os:android': { 'os:android': {
'CCFLAGS': ['-mthumb', '-Os'], 'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT'] 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
} }
}, },
'os:linux': { 'os:linux': {
@ -229,7 +237,6 @@ LIBRARY_FLAGS = {
V8_EXTRA_FLAGS = { V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'CXXFLAGS': [], #['-fvisibility=hidden'],
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror', '-Werror',
'-W', '-W',
@ -576,6 +583,16 @@ SIMPLE_OPTIONS = {
'default': 'static', 'default': 'static',
'help': 'the type of library to produce' 'help': 'the type of library to produce'
}, },
'profilingsupport': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable profiling of JavaScript code'
},
'debuggersupport': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable debugging of JavaScript code'
},
'soname': { 'soname': {
'values': ['on', 'off'], 'values': ['on', 'off'],
'default': 'off', 'default': 'off',
@ -615,6 +632,11 @@ SIMPLE_OPTIONS = {
'values': ['on', 'off'], 'values': ['on', 'off'],
'default': 'off', 'default': 'off',
'help': 'more output from compiler and linker' 'help': 'more output from compiler and linker'
},
'visibility': {
'values': ['default', 'hidden'],
'default': 'hidden',
'help': 'shared library symbol visibility'
} }
} }
@ -794,6 +816,10 @@ def PostprocessOptions(options):
# Print a warning if arch has explicitly been set # Print a warning if arch has explicitly been set
print "Warning: forcing architecture to match simulator (%s)" % options['simulator'] print "Warning: forcing architecture to match simulator (%s)" % options['simulator']
options['arch'] = options['simulator'] options['arch'] = options['simulator']
if (options['prof'] != 'off') and (options['profilingsupport'] == 'off'):
# Print a warning if profiling is enabled without profiling support
print "Warning: forcing profilingsupport on when prof is on"
options['profilingsupport'] = 'on'
def ParseEnvOverrides(arg, imports): def ParseEnvOverrides(arg, imports):

6
deps/v8/include/v8.h

@ -2725,9 +2725,9 @@ class Internals {
// These constants are compiler dependent so their values must be // These constants are compiler dependent so their values must be
// defined within the implementation. // defined within the implementation.
static int kJSObjectType; V8EXPORT static int kJSObjectType;
static int kFirstNonstringType; V8EXPORT static int kFirstNonstringType;
static int kProxyType; V8EXPORT static int kProxyType;
static inline bool HasHeapObjectTag(internal::Object* value) { static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==

4
deps/v8/src/api.cc

@ -2672,9 +2672,7 @@ Persistent<Context> v8::Context::New(
} }
// Leave V8. // Leave V8.
if (!ApiCheck(!env.is_null(), if (env.is_null())
"v8::Context::New()",
"Could not initialize environment"))
return Persistent<Context>(); return Persistent<Context>();
return Persistent<Context>(Utils::ToLocal(env)); return Persistent<Context>(Utils::ToLocal(env));
} }

29
deps/v8/src/arm/assembler-arm-inl.h

@ -105,40 +105,45 @@ Address* RelocInfo::target_reference_address() {
Address RelocInfo::call_address() { Address RelocInfo::call_address() {
ASSERT(IsCallInstruction()); ASSERT(IsCallInstruction());
UNIMPLEMENTED(); // The 2 instructions offset assumes patched return sequence.
return NULL; ASSERT(IsJSReturn(rmode()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
} }
void RelocInfo::set_call_address(Address target) { void RelocInfo::set_call_address(Address target) {
ASSERT(IsCallInstruction()); ASSERT(IsCallInstruction());
UNIMPLEMENTED(); // The 2 instructions offset assumes patched return sequence.
ASSERT(IsJSReturn(rmode()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
} }
Object* RelocInfo::call_object() { Object* RelocInfo::call_object() {
ASSERT(IsCallInstruction()); return *call_object_address();
UNIMPLEMENTED();
return NULL;
} }
Object** RelocInfo::call_object_address() { Object** RelocInfo::call_object_address() {
ASSERT(IsCallInstruction()); ASSERT(IsCallInstruction());
UNIMPLEMENTED(); // The 2 instructions offset assumes patched return sequence.
return NULL; ASSERT(IsJSReturn(rmode()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
} }
void RelocInfo::set_call_object(Object* target) { void RelocInfo::set_call_object(Object* target) {
ASSERT(IsCallInstruction()); *call_object_address() = target;
UNIMPLEMENTED();
} }
bool RelocInfo::IsCallInstruction() { bool RelocInfo::IsCallInstruction() {
UNIMPLEMENTED(); // On ARM a "call instruction" is actually two instructions.
return false; // mov lr, pc
// ldr pc, [pc, #XXX]
return (Assembler::instr_at(pc_) == kMovLrPc)
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
== kLdrPCPattern);
} }

33
deps/v8/src/arm/assembler-arm.cc

@ -93,7 +93,14 @@ const int RelocInfo::kApplyMask = 0;
void RelocInfo::PatchCode(byte* instructions, int instruction_count) { void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Patch the code at the current address with the supplied instructions. // Patch the code at the current address with the supplied instructions.
UNIMPLEMENTED(); Instr* pc = reinterpret_cast<Instr*>(pc_);
Instr* instr = reinterpret_cast<Instr*>(instructions);
for (int i = 0; i < instruction_count; i++) {
*(pc + i) = *(instr + i);
}
// Indicate that code has changed.
CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
} }
@ -232,6 +239,10 @@ static const Instr kPushRegPattern =
// register r is not encoded. // register r is not encoded.
static const Instr kPopRegPattern = static const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16; al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc
const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
// spare_buffer_ // spare_buffer_
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
@ -1301,6 +1312,13 @@ void Assembler::lea(Register dst,
// Debugging // Debugging
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
CheckBuffer();
RecordRelocInfo(RelocInfo::JS_RETURN);
}
void Assembler::RecordComment(const char* msg) { void Assembler::RecordComment(const char* msg) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
CheckBuffer(); CheckBuffer();
@ -1387,16 +1405,20 @@ void Assembler::GrowBuffer() {
RelocInfo& rinfo = prinfo_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION); rinfo.rmode() != RelocInfo::POSITION);
rinfo.set_pc(rinfo.pc() + pc_delta); if (rinfo.rmode() != RelocInfo::JS_RETURN) {
rinfo.set_pc(rinfo.pc() + pc_delta);
}
} }
} }
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
if (rmode >= RelocInfo::COMMENT && rmode <= RelocInfo::STATEMENT_POSITION) { if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
// adjust code for new modes // Adjust code for new modes
ASSERT(RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode)); ASSERT(RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// these modes do not need an entry in the constant pool // these modes do not need an entry in the constant pool
} else { } else {
ASSERT(num_prinfo_ < kMaxNumPRInfo); ASSERT(num_prinfo_ < kMaxNumPRInfo);
@ -1490,6 +1512,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::POSITION && rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION); rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be a ldr/str [pc, #offset] // Instruction to patch must be a ldr/str [pc, #offset]
// P and U set, B and W clear, Rn == pc, offset12 still 0 // P and U set, B and W clear, Rn == pc, offset12 still 0
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) == ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==

24
deps/v8/src/arm/assembler-arm.h

@ -376,6 +376,10 @@ class MemOperand BASE_EMBEDDED {
typedef int32_t Instr; typedef int32_t Instr;
extern const Instr kMovLrPc;
extern const Instr kLdrPCPattern;
class Assembler : public Malloced { class Assembler : public Malloced {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
@ -433,12 +437,16 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc)); INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target)); INLINE(static void set_target_address_at(Address pc, Address target));
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
// Distance between the instruction referring to the address of the call // Distance between the instruction referring to the address of the call
// target (ldr pc, [target addr in const pool]) and the return address // target (ldr pc, [target addr in const pool]) and the return address
static const int kPatchReturnSequenceLength = sizeof(Instr); static const int kCallTargetAddressOffset = kInstrSize;
// Distance between start of patched return sequence and the emitted address // Distance between start of patched return sequence and the emitted address
// to jump to. // to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; static const int kPatchReturnSequenceAddressOffset = kInstrSize;
// Difference between address of current opcode and value read from pc // Difference between address of current opcode and value read from pc
// register. // register.
@ -652,9 +660,16 @@ class Assembler : public Malloced {
// Jump unconditionally to given label. // Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); } void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
// Debugging // Debugging
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
// Record a comment relocation entry that can be used by a disassembler. // Record a comment relocation entry that can be used by a disassembler.
// Use --debug_code to enable. // Use --debug_code to enable.
void RecordComment(const char* msg); void RecordComment(const char* msg);
@ -671,7 +686,7 @@ class Assembler : public Malloced {
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions // Read/patch instructions
Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) { void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr; *reinterpret_cast<Instr*>(pc) = instr;
} }
@ -708,7 +723,6 @@ class Assembler : public Malloced {
int next_buffer_check_; // pc offset of next buffer check int next_buffer_check_; // pc offset of next buffer check
// Code generation // Code generation
static const int kInstrSize = sizeof(Instr); // signed size
// The relocation writer's position is at least kGap bytes below the end of // The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do // the generated instructions. This is so that multi-instruction sequences do
// not have to check for overflow. The same is true for writes of large // not have to check for overflow. The same is true for writes of large
@ -795,6 +809,8 @@ class Assembler : public Malloced {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
friend class RegExpMacroAssemblerARM; friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
}; };
} } // namespace v8::internal } } // namespace v8::internal

36
deps/v8/src/arm/codegen-arm.cc

@ -299,7 +299,10 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
} }
// Generate the return sequence if necessary. // Generate the return sequence if necessary.
if (frame_ != NULL || function_return_.is_linked()) { if (has_valid_frame() || function_return_.is_linked()) {
if (!function_return_.is_linked()) {
CodeForReturnPosition(fun);
}
// exit // exit
// r0: result // r0: result
// sp: stack pointer // sp: stack pointer
@ -315,12 +318,23 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kTraceExit, 1); frame_->CallRuntime(Runtime::kTraceExit, 1);
} }
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
// Tear down the frame which will restore the caller's frame pointer and // Tear down the frame which will restore the caller's frame pointer and
// the link register. // the link register.
frame_->Exit(); frame_->Exit();
__ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize)); // Here we use masm_-> instead of the __ macro to avoid the code coverage
__ Jump(lr); // tool from instrumenting as we rely on the code size here.
masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(kJSReturnSequenceLength,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
} }
// Code generation state must be reset. // Code generation state must be reset.
@ -1111,10 +1125,10 @@ void CodeGenerator::CheckStack() {
if (FLAG_check_stack) { if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack"); Comment cmnt(masm_, "[ check stack");
__ LoadRoot(ip, Heap::kStackLimitRootIndex); __ LoadRoot(ip, Heap::kStackLimitRootIndex);
// Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is // Put the lr setup instruction in the delay slot. kInstrSize is added to
// added to the implicit 8 byte offset that always applies to operations // the implicit 8 byte offset that always applies to operations with pc and
// with pc and gives a return address 12 bytes down. // gives a return address 12 bytes down.
masm_->add(lr, pc, Operand(sizeof(Instr))); masm_->add(lr, pc, Operand(Assembler::kInstrSize));
masm_->cmp(sp, Operand(ip)); masm_->cmp(sp, Operand(ip));
StackCheckStub stub; StackCheckStub stub;
// Call the stub if lower. // Call the stub if lower.
@ -1380,16 +1394,12 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
VirtualFrame::SpilledScope spilled_scope; VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ReturnStatement"); Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
if (function_return_is_shadowed_) { if (function_return_is_shadowed_) {
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
frame_->EmitPop(r0); frame_->EmitPop(r0);
function_return_.Jump(); function_return_.Jump();
} else { } else {
// Load the returned value.
CodeForStatementPosition(node);
LoadAndSpill(node->expression());
// Pop the result from the frame and prepare the frame for // Pop the result from the frame and prepare the frame for
// returning thus making it easier to merge. // returning thus making it easier to merge.
frame_->EmitPop(r0); frame_->EmitPop(r0);

4
deps/v8/src/arm/codegen-arm.h

@ -180,6 +180,10 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1; static const int kUnknownIntValue = -1;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 4;
private: private:
// Construction/Destruction // Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval); CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);

48
deps/v8/src/arm/debug-arm.cc

@ -34,28 +34,41 @@ namespace v8 {
namespace internal { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Currently debug break is not supported in frame exit code on ARM.
bool BreakLocationIterator::IsDebugBreakAtReturn() { bool BreakLocationIterator::IsDebugBreakAtReturn() {
return false; return Debug::IsDebugBreakAtReturn(rinfo());
} }
// Currently debug break is not supported in frame exit code on ARM.
void BreakLocationIterator::SetDebugBreakAtReturn() { void BreakLocationIterator::SetDebugBreakAtReturn() {
UNIMPLEMENTED(); // Patch the code changing the return from JS function sequence from
// mov sp, fp
// ldmia sp!, {fp, lr}
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
// mov lr, pc
// ldr pc, [pc, #-4]
// <debug break return code entry point address>
// bktp 0
CodePatcher patcher(rinfo()->pc(), 4);
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
patcher.Emit(Debug::debug_break_return()->entry());
patcher.masm()->bkpt(0);
} }
// Currently debug break is not supported in frame exit code on ARM. // Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() { void BreakLocationIterator::ClearDebugBreakAtReturn() {
UNIMPLEMENTED(); rinfo()->PatchCode(original_rinfo()->pc(),
CodeGenerator::kJSReturnSequenceLength);
} }
// A debug break in the exit code is identified by a call.
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) { bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
// Currently debug break is not supported in frame exit code on ARM. return rinfo->IsCallInstruction();
return false;
} }
@ -95,8 +108,6 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Inlined ExitJSFrame ends here.
// Finally restore all registers. // Finally restore all registers.
__ RestoreRegistersFromMemory(kJSCallerSaved); __ RestoreRegistersFromMemory(kJSCallerSaved);
@ -138,12 +149,20 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// Keyed load IC not implemented on ARM. // ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
Generate_DebugBreakCallHelper(masm, 0);
} }
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Keyed store IC not implemented on ARM. // ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
Generate_DebugBreakCallHelper(masm, 0);
} }
@ -180,7 +199,10 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// Generate nothing as CodeStub CallFunction is not used on ARM. // ----------- S t a t e -------------
// No registers used on entry.
// -----------------------------------
Generate_DebugBreakCallHelper(masm, 0);
} }

42
deps/v8/src/arm/macro-assembler-arm.cc

@ -133,7 +133,7 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
// and the target address of the call would be referenced by the first // and the target address of the call would be referenced by the first
// instruction rather than the second one, which would make it harder to patch // instruction rather than the second one, which would make it harder to patch
// (two instructions before the return address, instead of one). // (two instructions before the return address, instead of one).
ASSERT(kPatchReturnSequenceLength == sizeof(Instr)); ASSERT(kCallTargetAddressOffset == kInstrSize);
} }
@ -167,7 +167,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
add(pc, pc, Operand(index, add(pc, pc, Operand(index,
LSL, LSL,
assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * sizeof(Instr)); BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
nop(); // Jump table alignment. nop(); // Jump table alignment.
for (int i = 0; i < targets.length(); i++) { for (int i = 0; i < targets.length(); i++) {
b(targets[i]); b(targets[i]);
@ -1054,7 +1054,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsIsPCRelative::encode(true) | Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false); Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = { pc_offset() - sizeof(Instr), flags, name }; Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry); unresolved_.Add(entry);
} }
} }
@ -1072,7 +1072,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
Bootstrapper::FixupFlagsIsPCRelative::encode(true) | Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
Bootstrapper::FixupFlagsUseCodeObject::encode(true); Bootstrapper::FixupFlagsUseCodeObject::encode(true);
Unresolved entry = { pc_offset() - sizeof(Instr), flags, name }; Unresolved entry = { pc_offset() - kInstrSize, flags, name };
unresolved_.Add(entry); unresolved_.Add(entry);
} }
@ -1153,4 +1153,38 @@ void MacroAssembler::Abort(const char* msg) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
CPU::FlushICache(address_, size_);
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
void CodePatcher::Emit(Instr x) {
masm()->emit(x);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal

29
deps/v8/src/arm/macro-assembler-arm.h

@ -339,6 +339,35 @@ class MacroAssembler: public Assembler {
}; };
#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion to fail.
class CodePatcher {
public:
CodePatcher(byte* address, int instructions);
virtual ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr x);
// Emit an address directly.
void Emit(Address addr);
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
#endif // ENABLE_DEBUGGER_SUPPORT
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Static helper functions. // Static helper functions.

12
deps/v8/src/arm/virtual-frame-arm.cc

@ -127,6 +127,10 @@ void VirtualFrame::Enter() {
void VirtualFrame::Exit() { void VirtualFrame::Exit() {
Comment cmnt(masm(), "[ Exit JS frame"); Comment cmnt(masm(), "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
// Drop the execution stack down to the frame pointer and restore the caller // Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address. // frame pointer and return address.
__ mov(sp, fp); __ mov(sp, fp);
@ -149,10 +153,10 @@ void VirtualFrame::AllocateStackSlots() {
__ push(ip); __ push(ip);
} }
if (FLAG_check_stack) { if (FLAG_check_stack) {
// Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is // Put the lr setup instruction in the delay slot. The kInstrSize is added
// added to the implicit 8 byte offset that always applies to operations // to the implicit 8 byte offset that always applies to operations with pc
// with pc and gives a return address 12 bytes down. // and gives a return address 12 bytes down.
masm()->add(lr, pc, Operand(sizeof(Instr))); masm()->add(lr, pc, Operand(Assembler::kInstrSize));
masm()->cmp(sp, Operand(r2)); masm()->cmp(sp, Operand(r2));
StackCheckStub stub; StackCheckStub stub;
// Call the stub if lower. // Call the stub if lower.

2
deps/v8/src/assembler.cc

@ -494,7 +494,7 @@ void RelocInfo::Verify() {
Address addr = target_address(); Address addr = target_address();
ASSERT(addr != NULL); ASSERT(addr != NULL);
// Check that we can find the right code object. // Check that we can find the right code object.
HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize); Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = Heap::FindCodeObject(addr); Object* found = Heap::FindCodeObject(addr);
ASSERT(found->IsCode()); ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address()); ASSERT(code->address() == HeapObject::cast(found)->address());

2
deps/v8/src/debug.cc

@ -1604,7 +1604,7 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
// Find the call address in the running code. This address holds the call to // Find the call address in the running code. This address holds the call to
// either a DebugBreakXXX or to the debug break return entry code if the // either a DebugBreakXXX or to the debug break return entry code if the
// break point is still active after processing the break point. // break point is still active after processing the break point.
Address addr = frame->pc() - Assembler::kPatchReturnSequenceLength; Address addr = frame->pc() - Assembler::kCallTargetAddressOffset;
// Check if the location is at JS exit. // Check if the location is at JS exit.
bool at_js_return = false; bool at_js_return = false;

10
deps/v8/src/factory.cc

@ -401,10 +401,12 @@ Handle<Object> Factory::NewError(const char* maker,
const char* type, const char* type,
Handle<JSArray> args) { Handle<JSArray> args) {
Handle<String> make_str = Factory::LookupAsciiSymbol(maker); Handle<String> make_str = Factory::LookupAsciiSymbol(maker);
Handle<JSFunction> fun = Handle<Object> fun_obj(Top::builtins()->GetProperty(*make_str));
Handle<JSFunction>( // If the builtins haven't been properly configured yet this error
JSFunction::cast( // constructor may not have been defined. Bail out.
Top::builtins()->GetProperty(*make_str))); if (!fun_obj->IsJSFunction())
return Factory::undefined_value();
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = Factory::LookupAsciiSymbol(type); Handle<Object> type_obj = Factory::LookupAsciiSymbol(type);
Object** argv[2] = { type_obj.location(), Object** argv[2] = { type_obj.location(),
Handle<Object>::cast(args).location() }; Handle<Object>::cast(args).location() };

46
deps/v8/src/ia32/assembler-ia32.cc

@ -157,6 +157,9 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
for (int i = 0; i < instruction_count; i++) { for (int i = 0; i < instruction_count; i++) {
*(pc_ + i) = *(instructions + i); *(pc_ + i) = *(instructions + i);
} }
// Indicate that code has changed.
CPU::FlushICache(pc_, instruction_count);
} }
@ -164,12 +167,25 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// Additional guard int3 instructions can be added if required. // Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Call instruction takes up 5 bytes and int3 takes up one byte. // Call instruction takes up 5 bytes and int3 takes up one byte.
int code_size = 5 + guard_bytes; static const int kCallCodeSize = 5;
int code_size = kCallCodeSize + guard_bytes;
// Patch the code. // Create a code patcher.
CodePatcher patcher(pc_, code_size); CodePatcher patcher(pc_, code_size);
// Add a label for checking the size of the code used for returning.
#ifdef DEBUG
Label check_codesize;
patcher.masm()->bind(&check_codesize);
#endif
// Patch the code.
patcher.masm()->call(target, RelocInfo::NONE); patcher.masm()->call(target, RelocInfo::NONE);
// Check that the size of the code generated is as expected.
ASSERT_EQ(kCallCodeSize,
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call. // Add the requested number of int3 instructions after the call.
for (int i = 0; i < guard_bytes; i++) { for (int i = 0; i < guard_bytes; i++) {
patcher.masm()->int3(); patcher.masm()->int3();
@ -721,10 +737,10 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV)); ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
UNIMPLEMENTED(); // Opcode: 0f 40 + cc /r
USE(cc); EMIT(0x0F);
USE(dst); EMIT(0x40 + cc);
USE(src); emit_operand(dst, src);
} }
@ -866,6 +882,13 @@ void Assembler::cmp(const Operand& op, const Immediate& imm) {
} }
void Assembler::cmp(const Operand& op, Handle<Object> handle) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_arith(7, op, Immediate(handle));
}
void Assembler::cmpb_al(const Operand& op) { void Assembler::cmpb_al(const Operand& op) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1947,6 +1970,17 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x2F);
emit_sse_operand(dst, src);
}
void Assembler::movdbl(XMMRegister dst, const Operand& src) { void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

5
deps/v8/src/ia32/assembler-ia32.h

@ -437,7 +437,7 @@ class Assembler : public Malloced {
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address // and the return address
static const int kPatchReturnSequenceLength = kPointerSize; static const int kCallTargetAddressOffset = kPointerSize;
// Distance between start of patched return sequence and the emitted address // Distance between start of patched return sequence and the emitted address
// to jump to. // to jump to.
static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32. static const int kPatchReturnSequenceAddressOffset = 1; // JMP imm32.
@ -539,6 +539,7 @@ class Assembler : public Malloced {
void cmp(Register reg, Handle<Object> handle); void cmp(Register reg, Handle<Object> handle);
void cmp(Register reg, const Operand& op); void cmp(Register reg, const Operand& op);
void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
void dec_b(Register dst); void dec_b(Register dst);
@ -719,6 +720,8 @@ class Assembler : public Malloced {
void mulsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src);
void comisd(XMMRegister dst, XMMRegister src);
// Use either movsd or movlpd. // Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src); void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src); void movdbl(const Operand& dst, XMMRegister src);

203
deps/v8/src/ia32/codegen-ia32.cc

@ -768,6 +768,11 @@ class FloatingPointHelper : public AllStatic {
static void CheckFloatOperands(MacroAssembler* masm, static void CheckFloatOperands(MacroAssembler* masm,
Label* non_float, Label* non_float,
Register scratch); Register scratch);
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
// Leaves operands unchanged.
static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
// Allocate a heap number in new space with undefined value. // Allocate a heap number in new space with undefined value.
// Returns tagged pointer in eax, or jumps to need_gc if new space is full. // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
static void AllocateHeapNumber(MacroAssembler* masm, static void AllocateHeapNumber(MacroAssembler* masm,
@ -6699,41 +6704,79 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::DIV: { case Token::DIV: {
// eax: y // eax: y
// edx: x // edx: x
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
// Fast-case: Both operands are numbers.
// Allocate a heap number, if needed.
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
__ mov(eax, Operand(edx));
// Fall through!
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm,
&call_runtime,
ecx,
edx,
eax);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
FloatingPointHelper::LoadFloatOperands(masm, ecx);
switch (op_) { if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
case Token::ADD: __ faddp(1); break; CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
case Token::SUB: __ fsubp(1); break; FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
case Token::MUL: __ fmulp(1); break;
case Token::DIV: __ fdivp(1); break; switch (op_) {
default: UNREACHABLE(); case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
// Allocate a heap number, if needed.
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
__ mov(eax, Operand(edx));
// Fall through!
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm,
&call_runtime,
ecx,
edx,
eax);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
// Allocate a heap number, if needed.
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
__ mov(eax, Operand(edx));
// Fall through!
case OVERWRITE_RIGHT:
// If the argument in eax is already an object, we skip the
// allocation of a heap number.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &skip_allocation, not_taken);
// Fall through!
case NO_OVERWRITE:
FloatingPointHelper::AllocateHeapNumber(masm,
&call_runtime,
ecx,
edx,
eax);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
FloatingPointHelper::LoadFloatOperands(masm, ecx);
switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
case Token::DIV: __ fdivp(1); break;
default: UNREACHABLE();
}
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize);
} }
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize);
} }
case Token::MOD: { case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case. // For MOD we go directly to runtime in the non-smi case.
@ -6981,6 +7024,38 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
} }
void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
__ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
__ j(equal, &load_float_eax);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ sar(edx, 1); // Untag smi before converting to float.
__ cvtsi2sd(xmm0, Operand(edx));
__ shl(edx, 1); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ sar(eax, 1); // Untag smi before converting to float.
__ cvtsi2sd(xmm1, Operand(eax));
__ shl(eax, 1); // Retag smi for heap number overwriting test.
__ jmp(&done);
__ bind(&load_float_eax);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register scratch) { Register scratch) {
Label load_smi_1, load_smi_2, done_load_1, done; Label load_smi_1, load_smi_2, done_load_1, done;
@ -7343,28 +7418,56 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Inlined floating point compare. // Inlined floating point compare.
// Call builtin if operands are not floating point or smi. // Call builtin if operands are not floating point or smi.
Label check_for_symbols; Label check_for_symbols;
FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx); Label unordered;
FloatingPointHelper::LoadFloatOperands(masm, ecx); if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
__ FCmp(); CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
// Jump to builtin for NaN.
__ j(parity_even, &unordered, not_taken);
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
__ cmov(above, eax, Operand(ecx));
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, Operand(ecx));
__ ret(2 * kPointerSize);
} else {
FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx);
__ FCmp();
// Jump to builtin for NaN. // Jump to builtin for NaN.
__ j(parity_even, &call_builtin, not_taken); __ j(parity_even, &unordered, not_taken);
// TODO(1243847): Use cmov below once CpuFeatures are properly hooked up. Label below_lbl, above_lbl;
Label below_lbl, above_lbl; // Return a result of -1, 0, or 1, to indicate result of comparison.
// use edx, eax to convert unsigned to signed comparison __ j(below, &below_lbl, not_taken);
__ j(below, &below_lbl, not_taken); __ j(above, &above_lbl, not_taken);
__ j(above, &above_lbl, not_taken);
__ xor_(eax, Operand(eax)); // equal __ xor_(eax, Operand(eax)); // equal
__ ret(2 * kPointerSize); // Both arguments were pushed in case a runtime call was needed.
__ ret(2 * kPointerSize);
__ bind(&below_lbl); __ bind(&below_lbl);
__ mov(eax, -1); __ mov(eax, Immediate(Smi::FromInt(-1)));
__ ret(2 * kPointerSize); __ ret(2 * kPointerSize);
__ bind(&above_lbl); __ bind(&above_lbl);
__ mov(eax, 1); __ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(2 * kPointerSize); // eax, edx were pushed
}
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
__ bind(&unordered);
ASSERT(cc_ != not_equal);
if (cc_ == less || cc_ == less_equal) {
__ mov(eax, Immediate(Smi::FromInt(1)));
} else {
__ mov(eax, Immediate(Smi::FromInt(-1)));
}
__ ret(2 * kPointerSize); // eax, edx were pushed __ ret(2 * kPointerSize); // eax, edx were pushed
// Fast negative check for symbol-to-symbol equality. // Fast negative check for symbol-to-symbol equality.

4
deps/v8/src/ia32/ic-ia32.cc

@ -840,7 +840,7 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kPatchReturnSequenceLength; address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing // If the instruction following the call is not a test eax, nothing
// was inlined. // was inlined.
if (*test_instruction_address != kTestEaxByte) return false; if (*test_instruction_address != kTestEaxByte) return false;
@ -867,7 +867,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static bool PatchInlinedMapCheck(Address address, Object* map) { static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kPatchReturnSequenceLength; address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction // The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction. // is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false; if (*test_instruction_address != kTestEaxByte) return false;

4
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -319,7 +319,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() { void MacroAssembler::FCmp() {
fcompp(); fucompp();
push(eax); push(eax);
fnstsw_ax(); fnstsw_ax();
sahf(); sahf();
@ -1170,7 +1170,6 @@ void MacroAssembler::Abort(const char* msg) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int size) CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) { : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch. // Create a new macro assembler pointing to the address of the code to patch.
@ -1188,7 +1187,6 @@ CodePatcher::~CodePatcher() {
ASSERT(masm_.pc_ == address_ + size_); ASSERT(masm_.pc_ == address_ + size_);
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
} }
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal

2
deps/v8/src/ia32/macro-assembler-ia32.h

@ -338,7 +338,6 @@ class MacroAssembler: public Assembler {
}; };
#ifdef ENABLE_DEBUGGER_SUPPORT
// The code patcher is used to patch (typically) small parts of code e.g. for // The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher // debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit // the exact number of bytes specified must be emitted. Is not legal to emit
@ -357,7 +356,6 @@ class CodePatcher {
int size_; // Number of bytes of the expected patch size. int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code. MacroAssembler masm_; // Macro assembler used to generate the code.
}; };
#endif // ENABLE_DEBUGGER_SUPPORT
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

2
deps/v8/src/ic-inl.h

@ -38,7 +38,7 @@ namespace internal {
Address IC::address() { Address IC::address() {
// Get the address of the call. // Get the address of the call.
Address result = pc() - Assembler::kPatchReturnSequenceLength; Address result = pc() - Assembler::kCallTargetAddressOffset;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// First check if any break points are active if not just return the address // First check if any break points are active if not just return the address

2
deps/v8/src/ic.cc

@ -122,7 +122,7 @@ Address IC::OriginalCodeAddress() {
// Get the address of the call site in the active code. This is the // Get the address of the call site in the active code. This is the
// place where the call to DebugBreakXXX is and where the IC // place where the call to DebugBreakXXX is and where the IC
// normally would be. // normally would be.
Address addr = pc() - Assembler::kPatchReturnSequenceLength; Address addr = pc() - Assembler::kCallTargetAddressOffset;
// Return the address in the original code. This is the place where // Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides // the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look. // and the place where the inline cache system should look.

2
deps/v8/src/ic.h

@ -390,7 +390,7 @@ class KeyedStoreIC: public IC {
// Support for patching the map that is checked in an inlined // Support for patching the map that is checked in an inlined
// version of keyed store. // version of keyed store.
// The address is the patch point for the IC call // The address is the patch point for the IC call
// (Assembler::kPatchReturnSequenceLength before the end of // (Assembler::kCallTargetAddressOffset before the end of
// the call/return address). // the call/return address).
// The map is the new map that the inlined code should check against. // The map is the new map that the inlined code should check against.
static bool PatchInlinedStore(Address address, Object* map); static bool PatchInlinedStore(Address address, Object* map);

13
deps/v8/src/mark-compact.cc

@ -279,7 +279,7 @@ class MarkingVisitor : public ObjectVisitor {
void VisitCodeTarget(RelocInfo* rinfo) { void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* code = CodeFromDerivedPointer(rinfo->target_address()); Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) { if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
IC::Clear(rinfo->pc()); IC::Clear(rinfo->pc());
// Please note targets for cleared inline cached do not have to be // Please note targets for cleared inline cached do not have to be
@ -289,7 +289,7 @@ class MarkingVisitor : public ObjectVisitor {
} }
if (IsCompacting()) { if (IsCompacting()) {
// When compacting we convert the target to a real object pointer. // When compacting we convert the target to a real object pointer.
code = CodeFromDerivedPointer(rinfo->target_address()); code = Code::GetCodeFromTargetAddress(rinfo->target_address());
rinfo->set_target_object(code); rinfo->set_target_object(code);
} }
} }
@ -297,7 +297,7 @@ class MarkingVisitor : public ObjectVisitor {
void VisitDebugTarget(RelocInfo* rinfo) { void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsCallInstruction()); rinfo->IsCallInstruction());
HeapObject* code = CodeFromDerivedPointer(rinfo->call_address()); HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
MarkCompactCollector::MarkObject(code); MarkCompactCollector::MarkObject(code);
// When compacting we convert the call to a real object pointer. // When compacting we convert the call to a real object pointer.
if (IsCompacting()) rinfo->set_call_object(code); if (IsCompacting()) rinfo->set_call_object(code);
@ -314,13 +314,6 @@ class MarkingVisitor : public ObjectVisitor {
// Tells whether the mark sweep collection will perform compaction. // Tells whether the mark sweep collection will perform compaction.
bool IsCompacting() { return MarkCompactCollector::IsCompacting(); } bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
// Retrieves the Code pointer from derived code entry.
Code* CodeFromDerivedPointer(Address addr) {
ASSERT(addr != NULL);
return reinterpret_cast<Code*>(
HeapObject::FromAddress(addr - Code::kHeaderSize));
}
// Visit an unmarked object. // Visit an unmarked object.
void VisitUnmarkedObject(HeapObject* obj) { void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG #ifdef DEBUG

4
deps/v8/src/objects.cc

@ -4967,7 +4967,7 @@ void Code::ConvertICTargetsFromAddressToObject() {
!it.done(); it.next()) { !it.done(); it.next()) {
Address ic_addr = it.rinfo()->target_address(); Address ic_addr = it.rinfo()->target_address();
ASSERT(ic_addr != NULL); ASSERT(ic_addr != NULL);
HeapObject* code = HeapObject::FromAddress(ic_addr - Code::kHeaderSize); HeapObject* code = Code::GetCodeFromTargetAddress(ic_addr);
ASSERT(code->IsHeapObject()); ASSERT(code->IsHeapObject());
it.rinfo()->set_target_object(code); it.rinfo()->set_target_object(code);
} }
@ -4980,7 +4980,7 @@ void Code::ConvertICTargetsFromAddressToObject() {
if (it.rinfo()->IsCallInstruction()) { if (it.rinfo()->IsCallInstruction()) {
Address addr = it.rinfo()->call_address(); Address addr = it.rinfo()->call_address();
ASSERT(addr != NULL); ASSERT(addr != NULL);
HeapObject* code = HeapObject::FromAddress(addr - Code::kHeaderSize); HeapObject* code = Code::GetCodeFromTargetAddress(addr);
ASSERT(code->IsHeapObject()); ASSERT(code->IsHeapObject());
it.rinfo()->set_call_object(code); it.rinfo()->set_call_object(code);
} }

6
deps/v8/src/objects.h

@ -889,11 +889,11 @@ class Object BASE_EMBEDDED {
// Smi represents integer Numbers that can be stored in 31 bits. // Smi represents integer Numbers that can be stored in 31 bits.
// TODO(X64) Increase to 53 bits?
// Smis are immediate which means they are NOT allocated in the heap. // Smis are immediate which means they are NOT allocated in the heap.
// The this pointer has the following format: [31 bit signed int] 0
// TODO(X64): 31 bits signed int sign-extended to 63 bits.
// Smi stands for small integer. // Smi stands for small integer.
// The this pointer has the following format: [31 bit signed int] 0
// On 64-bit, the top 32 bits of the pointer is allowed to have any
// value.
class Smi: public Object { class Smi: public Object {
public: public:
// Returns the integer value. // Returns the integer value.

2
deps/v8/src/runtime.cc

@ -3696,7 +3696,7 @@ static Object* Runtime_NumberMod(Arguments args) {
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
CONVERT_DOUBLE_CHECKED(y, args[1]); CONVERT_DOUBLE_CHECKED(y, args[1]);
#ifdef WIN32 #if defined WIN32 || defined _WIN64
// Workaround MS fmod bugs. ECMA-262 says: // Workaround MS fmod bugs. ECMA-262 says:
// dividend is finite and divisor is an infinity => result equals dividend // dividend is finite and divisor is an infinity => result equals dividend
// dividend is a zero and divisor is nonzero finite => result equals dividend // dividend is a zero and divisor is nonzero finite => result equals dividend

2
deps/v8/src/spaces.h

@ -1686,7 +1686,7 @@ class CellSpace : public FixedSpace {
#endif #endif
public: public:
TRACK_MEMORY("MapSpace") TRACK_MEMORY("CellSpace")
}; };

18
deps/v8/src/third_party/dtoa/dtoa.c

@ -501,7 +501,9 @@ Balloc
#endif #endif
ACQUIRE_DTOA_LOCK(0); ACQUIRE_DTOA_LOCK(0);
if ((rv = freelist[k])) { /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0). */
/* but this case seems very unlikely. */
if (k <= Kmax && (rv = freelist[k])) {
freelist[k] = rv->next; freelist[k] = rv->next;
} }
else { else {
@ -511,7 +513,7 @@ Balloc
#else #else
len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1) len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
/sizeof(double); /sizeof(double);
if (pmem_next - private_mem + len <= PRIVATE_mem) { if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) {
rv = (Bigint*)pmem_next; rv = (Bigint*)pmem_next;
pmem_next += len; pmem_next += len;
} }
@ -535,10 +537,14 @@ Bfree
#endif #endif
{ {
if (v) { if (v) {
ACQUIRE_DTOA_LOCK(0); if (v->k > Kmax)
v->next = freelist[v->k]; free((void*)v);
freelist[v->k] = v; else {
FREE_DTOA_LOCK(0); ACQUIRE_DTOA_LOCK(0);
v->next = freelist[v->k];
freelist[v->k] = v;
FREE_DTOA_LOCK(0);
}
} }
} }

30
deps/v8/src/top.cc

@ -690,12 +690,17 @@ void Top::ComputeLocation(MessageLocation* target) {
void Top::ReportUncaughtException(Handle<Object> exception, void Top::ReportUncaughtException(Handle<Object> exception,
MessageLocation* location, MessageLocation* location,
Handle<String> stack_trace) { Handle<String> stack_trace) {
Handle<Object> message = Handle<Object> message;
MessageHandler::MakeMessageObject("uncaught_exception", if (!Bootstrapper::IsActive()) {
location, // It's not safe to try to make message objects while the bootstrapper
HandleVector<Object>(&exception, 1), // is active since the infrastructure may not have been properly
stack_trace); // initialized.
message =
MessageHandler::MakeMessageObject("uncaught_exception",
location,
HandleVector<Object>(&exception, 1),
stack_trace);
}
// Report the uncaught exception. // Report the uncaught exception.
MessageHandler::ReportMessage(location, message); MessageHandler::ReportMessage(location, message);
} }
@ -769,10 +774,15 @@ void Top::DoThrow(Object* exception,
ComputeLocation(&potential_computed_location); ComputeLocation(&potential_computed_location);
location = &potential_computed_location; location = &potential_computed_location;
} }
Handle<String> stack_trace; if (!Bootstrapper::IsActive()) {
if (FLAG_trace_exception) stack_trace = StackTrace(); // It's not safe to try to make message objects or collect stack
message_obj = MessageHandler::MakeMessageObject("uncaught_exception", // traces while the bootstrapper is active since the infrastructure
location, HandleVector<Object>(&exception_handle, 1), stack_trace); // may not have been properly initialized.
Handle<String> stack_trace;
if (FLAG_trace_exception) stack_trace = StackTrace();
message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
location, HandleVector<Object>(&exception_handle, 1), stack_trace);
}
} }
// Save the message for reporting if the the exception remains uncaught. // Save the message for reporting if the the exception remains uncaught.

5
deps/v8/src/v8.h

@ -51,11 +51,6 @@
#error both DEBUG and NDEBUG are set #error both DEBUG and NDEBUG are set
#endif #endif
// Enable debugger support by default, unless it is in ANDROID
#if !defined(ENABLE_DEBUGGER_SUPPORT) && !defined(ANDROID)
#define ENABLE_DEBUGGER_SUPPORT
#endif
// Basic includes // Basic includes
#include "../include/v8.h" #include "../include/v8.h"
#include "globals.h" #include "globals.h"

4
deps/v8/src/version.cc

@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1 #define MAJOR_VERSION 1
#define MINOR_VERSION 3 #define MINOR_VERSION 3
#define BUILD_NUMBER 10 #define BUILD_NUMBER 11
#define PATCH_LEVEL 0 #define PATCH_LEVEL 1
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the // Define SONAME to have the SCons build the put a specific SONAME into the

50
deps/v8/src/x64/assembler-x64.cc

@ -173,22 +173,32 @@ void CpuFeatures::Probe() {
// Patch the code at the current PC with a call to the target address. // Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required. // Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Call instruction takes up 13 bytes and int3 takes up one byte. // Load register with immediate 64 and call through a register instructions
static const int kCallInstructionSize = 13; // takes up 13 bytes and int3 takes up one byte.
Address patch_site = pc_; static const int kCallCodeSize = 13;
Memory::uint16_at(patch_site) = 0xBA49u; // movq r10, imm64 int code_size = kCallCodeSize + guard_bytes;
// Write "0x00, call r10" starting at last byte of address. We overwrite
// the 0x00 later, and this lets us write a uint32. // Create a code patcher.
Memory::uint32_at(patch_site + 9) = 0xD2FF4900u; // 0x00, call r10 CodePatcher patcher(pc_, code_size);
Memory::Address_at(patch_site + 2) = target;
// Add a label for checking the size of the code used for returning.
#ifdef DEBUG
Label check_codesize;
patcher.masm()->bind(&check_codesize);
#endif
// Patch the code.
patcher.masm()->movq(r10, target, RelocInfo::NONE);
patcher.masm()->call(r10);
// Check that the size of the code generated is as expected.
ASSERT_EQ(kCallCodeSize,
patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
// Add the requested number of int3 instructions after the call. // Add the requested number of int3 instructions after the call.
for (int i = 0; i < guard_bytes; i++) { for (int i = 0; i < guard_bytes; i++) {
*(patch_site + kCallInstructionSize + i) = 0xCC; // int3 patcher.masm()->int3();
} }
// Indicate that code has changed.
CPU::FlushICache(patch_site, kCallInstructionSize + guard_bytes);
} }
@ -197,6 +207,9 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
for (int i = 0; i < instruction_count; i++) { for (int i = 0; i < instruction_count; i++) {
*(pc_ + i) = *(instructions + i); *(pc_ + i) = *(instructions + i);
} }
// Indicate that code has changed.
CPU::FlushICache(pc_, instruction_count);
} }
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
@ -366,7 +379,7 @@ void Assembler::bind(Label* L) {
void Assembler::GrowBuffer() { void Assembler::GrowBuffer() {
ASSERT(overflow()); // should not call this otherwise ASSERT(buffer_overflow()); // should not call this otherwise
if (!own_buffer_) FATAL("external code buffer is too small"); if (!own_buffer_) FATAL("external code buffer is too small");
// compute new buffer size // compute new buffer size
@ -428,7 +441,7 @@ void Assembler::GrowBuffer() {
} }
} }
ASSERT(!overflow()); ASSERT(!buffer_overflow());
} }
@ -1410,6 +1423,15 @@ void Assembler::neg(Register dst) {
} }
void Assembler::negl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x3, dst);
}
void Assembler::neg(const Operand& dst) { void Assembler::neg(const Operand& dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

17
deps/v8/src/x64/assembler-x64.h

@ -447,7 +447,7 @@ class Assembler : public Malloced {
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address. Checked in the debug build. // and the return address. Checked in the debug build.
static const int kPatchReturnSequenceLength = 3 + kPointerSize; static const int kCallTargetAddressOffset = 3 + kPointerSize;
// Distance between start of patched return sequence and the emitted address // Distance between start of patched return sequence and the emitted address
// to jump to (movq = REX.W 0xB8+r.). // to jump to (movq = REX.W 0xB8+r.).
static const int kPatchReturnSequenceAddressOffset = 2; static const int kPatchReturnSequenceAddressOffset = 2;
@ -721,6 +721,7 @@ class Assembler : public Malloced {
void neg(Register dst); void neg(Register dst);
void neg(const Operand& dst); void neg(const Operand& dst);
void negl(Register dst);
void not_(Register dst); void not_(Register dst);
void not_(const Operand& dst); void not_(const Operand& dst);
@ -729,6 +730,10 @@ class Assembler : public Malloced {
arithmetic_op(0x0B, dst, src); arithmetic_op(0x0B, dst, src);
} }
void orl(Register dst, Register src) {
arithmetic_op_32(0x0B, dst, src);
}
void or_(Register dst, const Operand& src) { void or_(Register dst, const Operand& src) {
arithmetic_op(0x0B, dst, src); arithmetic_op(0x0B, dst, src);
} }
@ -860,6 +865,10 @@ class Assembler : public Malloced {
arithmetic_op(0x33, dst, src); arithmetic_op(0x33, dst, src);
} }
void xorl(Register dst, Register src) {
arithmetic_op_32(0x33, dst, src);
}
void xor_(Register dst, const Operand& src) { void xor_(Register dst, const Operand& src) {
arithmetic_op(0x33, dst, src); arithmetic_op(0x33, dst, src);
} }
@ -1049,7 +1058,9 @@ class Assembler : public Malloced {
// Check if there is less than kGap bytes available in the buffer. // Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting // If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information. // an instruction or relocation information.
inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } inline bool buffer_overflow() const {
return pc_ >= reloc_info_writer.pos() - kGap;
}
// Get the number of bytes available in the buffer. // Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; } inline int available_space() const { return reloc_info_writer.pos() - pc_; }
@ -1279,7 +1290,7 @@ class Assembler : public Malloced {
class EnsureSpace BASE_EMBEDDED { class EnsureSpace BASE_EMBEDDED {
public: public:
explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) { explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
if (assembler_->overflow()) assembler_->GrowBuffer(); if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
#ifdef DEBUG #ifdef DEBUG
space_before_ = assembler_->available_space(); space_before_ = assembler_->available_space();
#endif #endif

44
deps/v8/src/x64/builtins-x64.cc

@ -61,8 +61,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
// Preserve the number of arguments on the stack. Must preserve both // Preserve the number of arguments on the stack. Must preserve both
// rax and rbx because these registers are used when copying the // rax and rbx because these registers are used when copying the
// arguments and the receiver. // arguments and the receiver.
ASSERT(kSmiTagSize == 1); __ Integer32ToSmi(rcx, rax);
__ lea(rcx, Operand(rax, rax, times_1, kSmiTag));
__ push(rcx); __ push(rcx);
} }
@ -77,10 +76,13 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Remove caller arguments from the stack. // Remove caller arguments from the stack.
// rbx holds a Smi, so we convery to dword offset by multiplying by 4. // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0); ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4); ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
// TODO(smi): Find way to abstract indexing by a smi.
__ pop(rcx); __ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver // 1 * kPointerSize is offset of receiver.
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx); __ push(rcx);
} }
@ -192,8 +194,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label done, non_function, function; { Label done, non_function, function;
// The function to call is at position n+1 on the stack. // The function to call is at position n+1 on the stack.
__ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize)); __ movq(rdi, Operand(rsp, rax, times_pointer_size, +1 * kPointerSize));
__ testl(rdi, Immediate(kSmiTagMask)); __ JumpIfSmi(rdi, &non_function);
__ j(zero, &non_function);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(equal, &function); __ j(equal, &function);
@ -213,8 +214,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label call_to_object, use_global_receiver, patch_receiver, done; { Label call_to_object, use_global_receiver, patch_receiver, done;
__ movq(rbx, Operand(rsp, rax, times_pointer_size, 0)); __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
__ testl(rbx, Immediate(kSmiTagMask)); __ JumpIfSmi(rbx, &call_to_object);
__ j(zero, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex); __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
@ -230,8 +230,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ EnterInternalFrame(); // preserves rax, rbx, rdi __ EnterInternalFrame(); // preserves rax, rbx, rdi
// Store the arguments count on the stack (smi tagged). // Store the arguments count on the stack (smi tagged).
ASSERT(kSmiTag == 0); __ Integer32ToSmi(rax, rax);
__ shl(rax, Immediate(kSmiTagSize));
__ push(rax); __ push(rax);
__ push(rdi); // save edi across the call __ push(rdi); // save edi across the call
@ -242,7 +241,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Get the arguments count and untag it. // Get the arguments count and untag it.
__ pop(rax); __ pop(rax);
__ shr(rax, Immediate(kSmiTagSize)); __ SmiToInteger32(rax, rax);
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ jmp(&patch_receiver); __ jmp(&patch_receiver);
@ -355,8 +354,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Label okay; Label okay;
// Make rdx the space we need for the array when it is unrolled onto the // Make rdx the space we need for the array when it is unrolled onto the
// stack. // stack.
__ movq(rdx, rax); __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
__ shl(rdx, Immediate(kPointerSizeLog2 - kSmiTagSize));
__ cmpq(rcx, rdx); __ cmpq(rcx, rdx);
__ j(greater, &okay); __ j(greater, &okay);
@ -382,8 +380,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver. // Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver; Label call_to_object, use_global_receiver, push_receiver;
__ movq(rbx, Operand(rbp, kReceiverOffset)); __ movq(rbx, Operand(rbp, kReceiverOffset));
__ testl(rbx, Immediate(kSmiTagMask)); __ JumpIfSmi(rbx, &call_to_object);
__ j(zero, &call_to_object);
__ CompareRoot(rbx, Heap::kNullValueRootIndex); __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver); __ j(equal, &use_global_receiver);
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@ -446,7 +443,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function. // Invoke the function.
ParameterCount actual(rax); ParameterCount actual(rax);
__ shr(rax, Immediate(kSmiTagSize)); __ SmiToInteger32(rax, rax);
__ movq(rdi, Operand(rbp, kFunctionOffset)); __ movq(rdi, Operand(rbp, kFunctionOffset));
__ InvokeFunction(rdi, actual, CALL_FUNCTION); __ InvokeFunction(rdi, actual, CALL_FUNCTION);
@ -463,8 +460,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call; Label non_function_call;
// Check that function is not a smi. // Check that function is not a smi.
__ testl(rdi, Immediate(kSmiTagMask)); __ JumpIfSmi(rdi, &non_function_call);
__ j(zero, &non_function_call);
// Check that function is a JSFunction. // Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &non_function_call); __ j(not_equal, &non_function_call);
@ -492,7 +488,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterConstructFrame(); __ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack. // Store a smi-tagged arguments count on the stack.
__ shl(rax, Immediate(kSmiTagSize)); __ Integer32ToSmi(rax, rax);
__ push(rax); __ push(rax);
// Push the function to invoke on the stack. // Push the function to invoke on the stack.
@ -517,8 +513,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdi: constructor // rdi: constructor
__ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi // Will both indicate a NULL and a Smi
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfSmi(rax, &rt_call);
__ j(zero, &rt_call);
// rdi: constructor // rdi: constructor
// rax: initial map (if proven valid below) // rax: initial map (if proven valid below)
__ CmpObjectType(rax, MAP_TYPE, rbx); __ CmpObjectType(rax, MAP_TYPE, rbx);
@ -668,7 +663,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Retrieve smi-tagged arguments count from the stack. // Retrieve smi-tagged arguments count from the stack.
__ movq(rax, Operand(rsp, 0)); __ movq(rax, Operand(rsp, 0));
__ shr(rax, Immediate(kSmiTagSize)); __ SmiToInteger32(rax, rax);
// Push the allocated receiver to the stack. We need two copies // Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling // because we may have to return the original one and the calling
@ -701,8 +696,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// on page 74. // on page 74.
Label use_receiver, exit; Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense. // If the result is a smi, it is *not* an object in the ECMA sense.
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfSmi(rax, &use_receiver);
__ j(zero, &use_receiver);
// If the type of the result (stored in its map) is less than // If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense. // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
@ -721,8 +715,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return. // Remove caller arguments from the stack and return.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
// TODO(smi): Find a way to abstract indexing by a smi.
__ pop(rcx); __ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_4, 1 * kPointerSize)); // 1 ~ receiver // 1 * kPointerSize is offset of receiver.
__ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
__ push(rcx); __ push(rcx);
__ IncrementCounter(&Counters::constructed_objects, 1); __ IncrementCounter(&Counters::constructed_objects, 1);
__ ret(0); __ ret(0);

2
deps/v8/src/x64/cfg-x64.cc

@ -112,12 +112,14 @@ void ExitNode::Compile(MacroAssembler* masm) {
__ pop(rbp); __ pop(rbp);
int count = CfgGlobals::current()->fun()->scope()->num_parameters(); int count = CfgGlobals::current()->fun()->scope()->num_parameters();
__ ret((count + 1) * kPointerSize); __ ret((count + 1) * kPointerSize);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Add padding that will be overwritten by a debugger breakpoint. // Add padding that will be overwritten by a debugger breakpoint.
// "movq rsp, rbp; pop rbp" has length 4. "ret k" has length 3. // "movq rsp, rbp; pop rbp" has length 4. "ret k" has length 3.
const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3; const int kPadding = Debug::kX64JSReturnSequenceLength - 4 - 3;
for (int i = 0; i < kPadding; ++i) { for (int i = 0; i < kPadding; ++i) {
__ int3(); __ int3();
} }
#endif
} }

638
deps/v8/src/x64/codegen-x64.cc

File diff suppressed because it is too large

51
deps/v8/src/x64/ic-x64.cc

@ -95,7 +95,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
StringDictionary::kHeaderSize + StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r2, FieldOperand(r0, kCapacityOffset)); __ movq(r2, FieldOperand(r0, kCapacityOffset));
__ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int __ SmiToInteger32(r2, r2);
__ decl(r2); __ decl(r2);
// Generate an unrolled loop that performs a few probes before // Generate an unrolled loop that performs a few probes before
@ -132,7 +132,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
__ bind(&done); __ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
__ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag), __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
__ j(not_zero, miss_label); __ j(not_zero, miss_label);
// Get the value at the masked, scaled index. // Get the value at the masked, scaled index.
@ -148,8 +148,7 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value) { Register value) {
Label done; Label done;
// Check if the value is a Smi. // Check if the value is a Smi.
__ testl(value, Immediate(kSmiTagMask)); __ JumpIfSmi(value, &done);
__ j(zero, &done);
// Check if the object has been loaded. // Check if the object has been loaded.
__ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset)); __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
__ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset), __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
@ -167,7 +166,7 @@ static bool PatchInlinedMapCheck(Address address, Object* map) {
// Arguments are address of start of call sequence that called // Arguments are address of start of call sequence that called
// the IC, // the IC,
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kPatchReturnSequenceLength; address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction // The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction. // is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false; if (*test_instruction_address != kTestEaxByte) return false;
@ -265,8 +264,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(rcx, Operand(rsp, 2 * kPointerSize)); __ movq(rcx, Operand(rsp, 2 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(rcx, Immediate(kSmiTagMask)); __ JumpIfSmi(rcx, &slow);
__ j(zero, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object, // In the case that the object is a value-wrapper object,
@ -283,9 +281,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &slow); __ j(not_zero, &slow);
// Check that the key is a smi. // Check that the key is a smi.
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfNotSmi(rax, &check_string);
__ j(not_zero, &check_string); __ SmiToInteger32(rax, rax);
__ sarl(rax, Immediate(kSmiTagSize));
// Get the elements array of the object. // Get the elements array of the object.
__ bind(&index_int); __ bind(&index_int);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset)); __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
@ -410,8 +407,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Get the receiver from the stack. // Get the receiver from the stack.
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &slow);
__ j(zero, &slow);
// Get the map from the receiver. // Get the map from the receiver.
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need // Check that the receiver does not require access checks. We need
@ -422,8 +418,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Get the key from the stack. // Get the key from the stack.
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
// Check that the key is a smi. // Check that the key is a smi.
__ testl(rbx, Immediate(kSmiTagMask)); __ JumpIfNotSmi(rbx, &slow);
__ j(not_zero, &slow);
// If it is a smi, make sure it is zero-extended, so it can be // If it is a smi, make sure it is zero-extended, so it can be
// used as an index in a memory operand. // used as an index in a memory operand.
__ movl(rbx, rbx); // Clear the high bits of rbx. __ movl(rbx, rbx); // Clear the high bits of rbx.
@ -443,8 +438,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map()); __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
__ j(not_equal, &slow); __ j(not_equal, &slow);
// Untag the key (for checking against untagged length in the fixed array). // Untag the key (for checking against untagged length in the fixed array).
__ movl(rdx, rbx); __ SmiToInteger32(rdx, rbx);
__ sarl(rdx, Immediate(kSmiTagSize));
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset)); __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
// rax: value // rax: value
// rcx: FixedArray // rcx: FixedArray
@ -473,13 +467,13 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rbx: index (as a smi) // rbx: index (as a smi)
// flags: compare (rbx, rdx.length()) // flags: compare (rbx, rdx.length())
__ j(not_equal, &slow); // do not leave holes in the array __ j(not_equal, &slow); // do not leave holes in the array
__ sarl(rbx, Immediate(kSmiTagSize)); // untag __ SmiToInteger64(rbx, rbx);
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Restore tag and increment. // Increment and restore smi-tag.
__ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize)); __ Integer64AddToSmi(rbx, rbx, 1);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx); __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
__ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again __ SmiSubConstant(rbx, rbx, 1, NULL);
__ jmp(&fast); __ jmp(&fast);
@ -544,8 +538,7 @@ void CallIC::Generate(MacroAssembler* masm,
// Check if the receiver is a global object of some sort. // Check if the receiver is a global object of some sort.
Label invoke, global; Label invoke, global;
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &invoke);
__ j(zero, &invoke);
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx); __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global); __ j(equal, &global);
__ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE); __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
@ -594,8 +587,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// to probe. // to probe.
// //
// Check for number. // Check for number.
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &number);
__ j(zero, &number);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx); __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
__ j(not_equal, &non_number); __ j(not_equal, &non_number);
__ bind(&number); __ bind(&number);
@ -640,8 +632,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
// Move the result to register rdi and check that it isn't a smi. // Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx); __ movq(rdi, rdx);
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, miss);
__ j(zero, miss);
// Check that the value is a JavaScript function. // Check that the value is a JavaScript function.
__ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx); __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
@ -683,8 +674,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize)); __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &miss);
__ j(zero, &miss);
// Check that the receiver is a valid JS object. // Check that the receiver is a valid JS object.
// Because there are so many map checks and type checks, do not // Because there are so many map checks and type checks, do not
@ -844,8 +834,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, kPointerSize)); __ movq(rax, Operand(rsp, kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfSmi(rax, &miss);
__ j(zero, &miss);
// Check that the receiver is a valid JS object. // Check that the receiver is a valid JS object.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx); __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
@ -902,7 +891,7 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = Address test_instruction_address =
address + Assembler::kPatchReturnSequenceLength; address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing // If the instruction following the call is not a test eax, nothing
// was inlined. // was inlined.
if (*test_instruction_address != kTestEaxByte) return false; if (*test_instruction_address != kTestEaxByte) return false;

750
deps/v8/src/x64/macro-assembler-x64.cc

@ -412,6 +412,729 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
} }
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
#ifdef DEBUG
cmpq(src, Immediate(0xC0000000u));
Check(positive, "Smi conversion overflow");
#endif
if (dst.is(src)) {
addl(dst, src);
} else {
lea(dst, Operand(src, src, times_1, 0));
}
}
void MacroAssembler::Integer32ToSmi(Register dst,
Register src,
Label* on_overflow) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
movl(dst, src);
}
addl(dst, src);
j(overflow, on_overflow);
}
void MacroAssembler::Integer64AddToSmi(Register dst,
Register src,
int constant) {
#ifdef DEBUG
movl(kScratchRegister, src);
addl(kScratchRegister, Immediate(constant));
Check(no_overflow, "Add-and-smi-convert overflow");
Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
Check(valid, "Add-and-smi-convert overflow");
#endif
lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
}
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
movl(dst, src);
}
sarl(dst, Immediate(kSmiTagSize));
}
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
movsxlq(dst, src);
sar(dst, Immediate(kSmiTagSize));
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
ASSERT(power >= 0);
ASSERT(power < 64);
if (power == 0) {
SmiToInteger64(dst, src);
return;
}
movsxlq(dst, src);
shl(dst, Immediate(power - 1));
}
void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(kSmiTagMask));
j(zero, on_smi);
}
void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
Condition not_smi = CheckNotSmi(src);
j(not_smi, on_not_smi);
}
void MacroAssembler::JumpIfNotPositiveSmi(Register src,
Label* on_not_positive_smi) {
Condition not_positive_smi = CheckNotPositiveSmi(src);
j(not_positive_smi, on_not_positive_smi);
}
void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
int constant,
Label* on_equals) {
if (Smi::IsValid(constant)) {
Condition are_equal = CheckSmiEqualsConstant(src, constant);
j(are_equal, on_equals);
}
}
void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
Condition is_valid = CheckInteger32ValidSmiValue(src);
j(ReverseCondition(is_valid), on_invalid);
}
void MacroAssembler::JumpIfNotBothSmi(Register src1,
Register src2,
Label* on_not_both_smi) {
Condition not_both_smi = CheckNotBothSmi(src1, src2);
j(not_both_smi, on_not_both_smi);
}
Condition MacroAssembler::CheckSmi(Register src) {
testb(src, Immediate(kSmiTagMask));
return zero;
}
Condition MacroAssembler::CheckNotSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
return not_zero;
}
Condition MacroAssembler::CheckPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
return zero;
}
Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
return not_zero;
}
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
if (first.is(second)) {
return CheckSmi(first);
}
movl(kScratchRegister, first);
orl(kScratchRegister, second);
return CheckSmi(kScratchRegister);
}
Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
ASSERT_EQ(0, kSmiTag);
if (first.is(second)) {
return CheckNotSmi(first);
}
movl(kScratchRegister, first);
or_(kScratchRegister, second);
return CheckNotSmi(kScratchRegister);
}
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
cmpl(src, Immediate(0x40000000));
return equal;
}
Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
if (constant == 0) {
testl(src, src);
return zero;
}
if (Smi::IsValid(constant)) {
cmpl(src, Immediate(Smi::FromInt(constant)));
return zero;
}
// Can't be equal.
UNREACHABLE();
return no_condition;
}
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
// A 32-bit integer value can be converted to a smi if it is in the
// range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
// representation have bits 30 and 31 be equal.
cmpl(src, Immediate(0xC0000000u));
return positive;
}
void MacroAssembler::SmiNeg(Register dst,
Register src,
Label* on_not_smi_result) {
if (!dst.is(src)) {
movl(dst, src);
}
negl(dst);
testl(dst, Immediate(0x7fffffff));
// If the result is zero or 0x80000000, negation failed to create a smi.
j(equal, on_not_smi_result);
}
void MacroAssembler::SmiAdd(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
movl(dst, src1);
}
addl(dst, src2);
if (!dst.is(src1)) {
j(overflow, on_not_smi_result);
} else {
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
subl(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
}
}
void MacroAssembler::SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (!dst.is(src1)) {
movl(dst, src1);
}
subl(dst, src2);
if (!dst.is(src1)) {
j(overflow, on_not_smi_result);
} else {
Label smi_result;
j(no_overflow, &smi_result);
// Restore src1.
addl(src1, src2);
jmp(on_not_smi_result);
bind(&smi_result);
}
}
void MacroAssembler::SmiMul(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(src2));
if (dst.is(src1)) {
movq(kScratchRegister, src1);
}
SmiToInteger32(dst, src1);
imull(dst, src2);
j(overflow, on_not_smi_result);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
Label non_zero_result;
testl(dst, dst);
j(not_zero, &non_zero_result);
// Test whether either operand is negative (the other must be zero).
orl(kScratchRegister, src2);
j(negative, on_not_smi_result);
bind(&non_zero_result);
}
void MacroAssembler::SmiTryAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
// Does not assume that src is a smi.
ASSERT_EQ(1, kSmiTagMask);
ASSERT_EQ(0, kSmiTag);
ASSERT(Smi::IsValid(constant));
Register tmp = (src.is(dst) ? kScratchRegister : dst);
movl(tmp, src);
addl(tmp, Immediate(Smi::FromInt(constant)));
if (tmp.is(kScratchRegister)) {
j(overflow, on_not_smi_result);
testl(tmp, Immediate(kSmiTagMask));
j(not_zero, on_not_smi_result);
movl(dst, tmp);
} else {
movl(kScratchRegister, Immediate(kSmiTagMask));
cmovl(overflow, dst, kScratchRegister);
testl(dst, kScratchRegister);
j(not_zero, on_not_smi_result);
}
}
void MacroAssembler::SmiAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
ASSERT(Smi::IsValid(constant));
if (on_not_smi_result == NULL) {
if (dst.is(src)) {
movl(dst, src);
} else {
lea(dst, Operand(src, constant << kSmiTagSize));
}
} else {
if (!dst.is(src)) {
movl(dst, src);
}
addl(dst, Immediate(Smi::FromInt(constant)));
if (!dst.is(src)) {
j(overflow, on_not_smi_result);
} else {
Label result_ok;
j(no_overflow, &result_ok);
subl(dst, Immediate(Smi::FromInt(constant)));
jmp(on_not_smi_result);
bind(&result_ok);
}
}
}
void MacroAssembler::SmiSubConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result) {
ASSERT(Smi::IsValid(constant));
Smi* smi_value = Smi::FromInt(constant);
if (dst.is(src)) {
// Optimistic subtract - may change value of dst register,
// if it has garbage bits in the higher half, but will not change
// the value as a tagged smi.
subl(dst, Immediate(smi_value));
if (on_not_smi_result != NULL) {
Label add_success;
j(no_overflow, &add_success);
addl(dst, Immediate(smi_value));
jmp(on_not_smi_result);
bind(&add_success);
}
} else {
UNIMPLEMENTED(); // Not used yet.
}
}
void MacroAssembler::SmiDiv(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
Label positive_divisor;
testl(src2, src2);
j(zero, on_not_smi_result);
j(positive, &positive_divisor);
// Check for negative zero result. If the dividend is zero, and the
// divisor is negative, return a floating point negative zero.
testl(src1, src1);
j(zero, on_not_smi_result);
bind(&positive_divisor);
// Sign extend src1 into edx:eax.
if (!src1.is(rax)) {
movl(rax, src1);
}
cdq();
idivl(src2);
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
cmpl(rax, Immediate(0x40000000));
j(equal, on_not_smi_result);
// Check that the remainder is zero.
testl(rdx, rdx);
j(not_zero, on_not_smi_result);
// Tag the result and store it in the destination register.
Integer32ToSmi(dst, rax);
}
void MacroAssembler::SmiMod(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(kScratchRegister));
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
ASSERT(!src2.is(rax));
ASSERT(!src2.is(rdx));
ASSERT(!src1.is(rdx));
testl(src2, src2);
j(zero, on_not_smi_result);
if (src1.is(rax)) {
// Mist remember the value to see if a zero result should
// be a negative zero.
movl(kScratchRegister, rax);
} else {
movl(rax, src1);
}
// Sign extend eax into edx:eax.
cdq();
idivl(src2);
// Check for a negative zero result. If the result is zero, and the
// dividend is negative, return a floating point negative zero.
Label non_zero_result;
testl(rdx, rdx);
j(not_zero, &non_zero_result);
if (src1.is(rax)) {
testl(kScratchRegister, kScratchRegister);
} else {
testl(src1, src1);
}
j(negative, on_not_smi_result);
bind(&non_zero_result);
if (!dst.is(rdx)) {
movl(dst, rdx);
}
}
void MacroAssembler::SmiNot(Register dst, Register src) {
if (dst.is(src)) {
not_(dst);
// Remove inverted smi-tag. The mask is sign-extended to 64 bits.
xor_(src, Immediate(kSmiTagMask));
} else {
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(src, kSmiTagMask));
not_(dst);
}
}
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
and_(dst, src2);
}
void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
and_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
or_(dst, src2);
}
void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
or_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movl(dst, src1);
}
xor_(dst, src2);
}
void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
ASSERT(Smi::IsValid(constant));
if (!dst.is(src)) {
movl(dst, src);
}
xor_(dst, Immediate(Smi::FromInt(constant)));
}
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
if (shift_value > 0) {
if (dst.is(src)) {
sarl(dst, Immediate(shift_value));
and_(dst, Immediate(~kSmiTagMask));
} else {
UNIMPLEMENTED(); // Not used.
}
}
}
void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
// Logic right shift interprets its result as an *unsigned* number.
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
movl(dst, src);
// Untag the smi.
sarl(dst, Immediate(kSmiTagSize));
if (shift_value < 2) {
// A negative Smi shifted right two is in the positive Smi range,
// but if shifted only by zero or one, it never is.
j(negative, on_not_smi_result);
}
if (shift_value > 0) {
// Do the right shift on the integer value.
shrl(dst, Immediate(shift_value));
}
// Re-tag the result.
addl(dst, dst);
}
}
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result) {
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
movl(dst, src);
if (shift_value > 0) {
// Treat dst as an untagged integer value equal to two times the
// smi value of src, i.e., already shifted left by one.
if (shift_value > 1) {
shll(dst, Immediate(shift_value - 1));
}
// Convert int result to Smi, checking that it is in smi range.
ASSERT(kSmiTagSize == 1); // adjust code if not the case
Integer32ToSmi(dst, dst, on_not_smi_result);
}
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
shll(dst);
// Check that the *signed* result fits in a smi.
Condition is_valid = CheckInteger32ValidSmiValue(dst);
j(is_valid, &result_ok);
// Restore the relevant bits of the source registers
// and call the slow version.
if (dst.is(src1)) {
shrl(dst);
Integer32ToSmi(dst, dst);
}
Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result) {
ASSERT(!dst.is(rcx));
Label result_ok;
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
shrl(dst);
// Check that the *unsigned* result fits in a smi.
// I.e., that it is a valid positive smi value. The positive smi
// values are 0..0x3fffffff, i.e., neither of the top-most two
// bits can be set.
//
// These two cases can only happen with shifts by 0 or 1 when
// handed a valid smi. If the answer cannot be represented by a
// smi, restore the left and right arguments, and jump to slow
// case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway.
testl(dst, Immediate(0xc0000000));
j(zero, &result_ok);
if (dst.is(src1)) {
shll(dst);
Integer32ToSmi(dst, dst);
}
Integer32ToSmi(rcx, rcx);
jmp(on_not_smi_result);
bind(&result_ok);
// Smi-tag the result in answer.
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
ASSERT(!dst.is(rcx));
// Untag both operands.
SmiToInteger32(dst, src1);
SmiToInteger32(rcx, src2);
// Shift as integer.
sarl(dst);
// Retag result.
Integer32ToSmi(dst, dst);
}
void MacroAssembler::SelectNonSmi(Register dst,
Register src1,
Register src2,
Label* on_not_smis) {
ASSERT(!dst.is(src1));
ASSERT(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
Condition not_both_smis = CheckNotBothSmi(src1, src2);
Check(not_both_smis, "Both registers were smis.");
#endif
ASSERT_EQ(0, kSmiTag);
ASSERT_EQ(0, Smi::FromInt(0));
movq(kScratchRegister, Immediate(kSmiTagMask));
and_(kScratchRegister, src1);
testl(kScratchRegister, src2);
j(not_zero, on_not_smis);
// One operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subq(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movq(dst, src1);
xor_(dst, src2);
and_(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
xor_(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
}
SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
// We expect that all smis are actually zero-padded. If this holds after
// checking, this line can be omitted.
movl(dst, src); // Ensure that the smi is zero-padded.
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src); // Ensure that the smi is zero-padded.
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
// Register src holds a positive smi.
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
neg(dst);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
movl(dst, src);
neg(dst);
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src);
neg(dst);
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
bool MacroAssembler::IsUnsafeSmi(Smi* value) { bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false; return false;
} }
@ -520,7 +1243,7 @@ void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif #endif
jmp(kScratchRegister); jmp(kScratchRegister);
#ifdef DEBUG #ifdef DEBUG
ASSERT_EQ(kPatchReturnSequenceLength, ASSERT_EQ(kCallTargetAddressOffset,
SizeOfCodeGeneratedSince(&target) + kPointerSize); SizeOfCodeGeneratedSince(&target) + kPointerSize);
#endif #endif
} }
@ -549,7 +1272,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
#endif #endif
call(kScratchRegister); call(kScratchRegister);
#ifdef DEBUG #ifdef DEBUG
ASSERT_EQ(kPatchReturnSequenceLength, ASSERT_EQ(kCallTargetAddressOffset,
SizeOfCodeGeneratedSince(&target) + kPointerSize); SizeOfCodeGeneratedSince(&target) + kPointerSize);
#endif #endif
} }
@ -599,7 +1322,7 @@ void MacroAssembler::Ret() {
void MacroAssembler::FCmp() { void MacroAssembler::FCmp() {
fcompp(); fucompp();
push(rax); push(rax);
fnstsw_ax(); fnstsw_ax();
if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) { if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
@ -821,7 +1544,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
Bootstrapper::FixupFlagsIsPCRelative::encode(false) | Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false); Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry = Unresolved entry =
{ pc_offset() - kPatchReturnSequenceLength, flags, name }; { pc_offset() - kCallTargetAddressOffset, flags, name };
unresolved_.Add(entry); unresolved_.Add(entry);
} }
} }
@ -1406,4 +2129,23 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
} }
CodePatcher::CodePatcher(byte* address, int size)
: address_(address), size_(size), masm_(address, size + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
CPU::FlushICache(address_, size_);
// Check that the code was patched as expected.
ASSERT(masm_.pc_ == address_ + size_);
ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
} } // namespace v8::internal } } // namespace v8::internal

251
deps/v8/src/x64/macro-assembler-x64.h

@ -41,6 +41,13 @@ static const Register kScratchRegister = r10;
// Forward declaration. // Forward declaration.
class JumpTarget; class JumpTarget;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register),
scale(scale) {}
Register reg;
ScaleFactor scale;
};
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
@ -126,6 +133,230 @@ class MacroAssembler: public Assembler {
// Store the code object for the given builtin in the target register. // Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id); void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
// Only uses the low 32 bits of the src register.
void Integer32ToSmi(Register dst, Register src);
// Tag an integer value if possible, or jump the integer value cannot be
// represented as a smi. Only uses the low 32 bit of the src registers.
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
void Integer64AddToSmi(Register dst, Register src, int constant);
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power);
// Functions performing a check on a known or potential smi. Returns
// a condition that is satisfied if the check is successful.
// Is the value a tagged smi.
Condition CheckSmi(Register src);
// Is the value not a tagged smi.
Condition CheckNotSmi(Register src);
// Is the value a positive tagged smi.
Condition CheckPositiveSmi(Register src);
// Is the value not a positive tagged smi.
Condition CheckNotPositiveSmi(Register src);
// Are both values are tagged smis.
Condition CheckBothSmi(Register first, Register second);
// Is one of the values not a tagged smi.
Condition CheckNotBothSmi(Register first, Register second);
// Is the value the minimum smi value (since we are using
// two's complement numbers, negating the value is known to yield
// a non-smi value).
Condition CheckIsMinSmi(Register src);
// Check whether a tagged smi is equal to a constant.
Condition CheckSmiEqualsConstant(Register src, int constant);
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
// Jump to label if the value is a tagged smi.
void JumpIfSmi(Register src, Label* on_smi);
// Jump to label if the value is not a tagged smi.
void JumpIfNotSmi(Register src, Label* on_not_smi);
// Jump to label if the value is not a positive tagged smi.
void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
// Jump to label if the value is a tagged smi with value equal
// to the constant.
void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
// Jump if either or both register are not smi values.
void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
// Operations on tagged smi values.
// Smis represent a subset of integers. The subset is always equivalent to
// a two's complement interpretation of a fixed number of bits.
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
void SmiTryAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
// If the label is NULL, no testing on the result is done.
void SmiAddConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
// If the label is NULL, no testing on the result is done.
void SmiSubConstant(Register dst,
Register src,
int32_t constant,
Label* on_not_smi_result);
// Negating a smi can give a negative zero or too large positive value.
void SmiNeg(Register dst,
Register src,
Label* on_not_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiAdd(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiSub(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
void SmiMul(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
void SmiDiv(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
void SmiMod(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Bitwise operations.
void SmiNot(Register dst, Register src);
void SmiAnd(Register dst, Register src1, Register src2);
void SmiOr(Register dst, Register src1, Register src2);
void SmiXor(Register dst, Register src1, Register src2);
void SmiAndConstant(Register dst, Register src1, int constant);
void SmiOrConstant(Register dst, Register src1, int constant);
void SmiXorConstant(Register dst, Register src1, int constant);
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
Label* on_not_smi_result);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
// Shifts a smi value to the left, and returns the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLeft(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
Label* on_not_smi_result);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
// original.
// Uses and clobbers rcx, so dst may not be rcx.
void SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2);
// Specialized operations
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
void SelectNonSmi(Register dst,
Register src1,
Register src2,
Label* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
// The src register contains a *positive* smi value. The shift is the
// power of two to multiply the index value by (e.g.
// to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
// The returned index register may be either src or dst, depending
// on what is most efficient. If src and dst are different registers,
// src is always unchanged.
SmiIndex SmiToIndex(Register dst, Register src, int shift);
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Macro instructions // Macro instructions
@ -380,6 +611,26 @@ class MacroAssembler: public Assembler {
}; };
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion.
class CodePatcher {
public:
CodePatcher(byte* address, int size);
virtual ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Static helper functions. // Static helper functions.

62
deps/v8/src/x64/stub-cache-x64.cc

@ -47,17 +47,19 @@ static void ProbeTable(MacroAssembler* masm,
StubCache::Table table, StubCache::Table table,
Register name, Register name,
Register offset) { Register offset) {
// The offset register must hold a *positive* smi.
ExternalReference key_offset(SCTableReference::keyReference(table)); ExternalReference key_offset(SCTableReference::keyReference(table));
Label miss; Label miss;
__ movq(kScratchRegister, key_offset); __ movq(kScratchRegister, key_offset);
SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
// Check that the key in the entry matches the name. // Check that the key in the entry matches the name.
__ cmpl(name, Operand(kScratchRegister, offset, times_4, 0)); __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
__ j(not_equal, &miss); __ j(not_equal, &miss);
// Get the code entry from the cache. // Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset. // Use key_offset + kPointerSize, rather than loading value_offset.
__ movq(kScratchRegister, __ movq(kScratchRegister,
Operand(kScratchRegister, offset, times_4, kPointerSize)); Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
// Check that the flags match what we're looking for. // Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup)); __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
@ -163,8 +165,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(name)); ASSERT(!scratch.is(name));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, &miss);
__ j(zero, &miss);
// Get the map of the receiver and compute the hash. // Get the map of the receiver and compute the hash.
__ movl(scratch, FieldOperand(name, String::kLengthOffset)); __ movl(scratch, FieldOperand(name, String::kLengthOffset));
@ -204,8 +205,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register scratch, Register scratch,
Label* miss_label) { Label* miss_label) {
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(receiver_reg, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver_reg, miss_label);
__ j(zero, miss_label);
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset), __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
@ -275,8 +275,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register scratch, Register scratch,
Label* miss_label) { Label* miss_label) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, miss_label);
__ j(zero, miss_label);
// Check that the object is a JS array. // Check that the object is a JS array.
__ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
@ -296,8 +295,7 @@ static void GenerateStringCheck(MacroAssembler* masm,
Label* smi, Label* smi,
Label* non_string_object) { Label* non_string_object) {
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, smi);
__ j(zero, smi);
// Check that the object is a string. // Check that the object is a string.
__ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
@ -325,7 +323,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// rcx is also the receiver. // rcx is also the receiver.
__ lea(rcx, Operand(scratch, String::kLongLengthShift)); __ lea(rcx, Operand(scratch, String::kLongLengthShift));
__ shr(rax); // rcx is implicit shift register. __ shr(rax); // rcx is implicit shift register.
__ shl(rax, Immediate(kSmiTagSize)); __ Integer32ToSmi(rax, rax);
__ ret(0); __ ret(0);
// Check if the object is a JSValue wrapper. // Check if the object is a JSValue wrapper.
@ -535,8 +533,7 @@ static void CompileLoadInterceptor(Compiler* compiler,
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, miss);
__ j(zero, miss);
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
@ -701,8 +698,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) { if (check != NUMBER_CHECK) {
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &miss);
__ j(zero, &miss);
} }
// Make sure that it's okay not to patch the on stack receiver // Make sure that it's okay not to patch the on stack receiver
@ -738,8 +734,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case NUMBER_CHECK: { case NUMBER_CHECK: {
Label fast; Label fast;
// Check that the object is a smi or a heap number. // Check that the object is a smi or a heap number.
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &fast);
__ j(zero, &fast);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &miss); __ j(not_equal, &miss);
__ bind(&fast); __ bind(&fast);
@ -830,8 +825,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &miss);
__ j(zero, &miss);
// Do the right check and compute the holder register. // Do the right check and compute the holder register.
Register reg = Register reg =
@ -841,8 +835,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
GenerateFastPropertyLoad(masm(), rdi, reg, holder, index); GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
// Check that the function really is a function. // Check that the function really is a function.
__ testl(rdi, Immediate(kSmiTagMask)); __ JumpIfSmi(rdi, &miss);
__ j(zero, &miss);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss); __ j(not_equal, &miss);
@ -899,8 +892,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Check that the function really is a function. // Check that the function really is a function.
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfSmi(rax, &miss);
__ j(zero, &miss);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx); __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
__ j(not_equal, &miss); __ j(not_equal, &miss);
@ -952,8 +944,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// object which can only happen for contextual calls. In this case, // object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi. // the receiver cannot be a smi.
if (object != holder) { if (object != holder) {
__ testl(rdx, Immediate(kSmiTagMask)); __ JumpIfSmi(rdx, &miss);
__ j(zero, &miss);
} }
// Check that the maps haven't changed. // Check that the maps haven't changed.
@ -1112,8 +1103,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// object which can only happen for contextual loads. In this case, // object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi. // the receiver cannot be a smi.
if (object != holder) { if (object != holder) {
__ testl(rax, Immediate(kSmiTagMask)); __ JumpIfSmi(rax, &miss);
__ j(zero, &miss);
} }
// Check that the maps haven't changed. // Check that the maps haven't changed.
@ -1335,8 +1325,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); __ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(rbx, Immediate(kSmiTagMask)); __ JumpIfSmi(rbx, &miss);
__ j(zero, &miss);
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@ -1424,8 +1413,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); __ movq(rbx, Operand(rsp, 1 * kPointerSize));
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ testl(rbx, Immediate(kSmiTagMask)); __ JumpIfSmi(rbx, &miss);
__ j(zero, &miss);
// Check that the map of the object hasn't changed. // Check that the map of the object hasn't changed.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
@ -1631,8 +1619,7 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
String* name, String* name,
Label* miss) { Label* miss) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, miss);
__ j(zero, miss);
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
@ -1701,8 +1688,7 @@ void StubCompiler::GenerateLoadField(JSObject* object,
String* name, String* name,
Label* miss) { Label* miss) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, miss);
__ j(zero, miss);
// Check the prototype chain. // Check the prototype chain.
Register reg = Register reg =
@ -1724,8 +1710,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
String* name, String* name,
Label* miss) { Label* miss) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ testl(receiver, Immediate(kSmiTagMask)); __ JumpIfSmi(receiver, miss);
__ j(zero, miss);
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
@ -1766,8 +1751,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi. // Will both indicate a NULL and a Smi.
__ testq(rbx, Immediate(kSmiTagMask)); __ JumpIfSmi(rbx, &generic_stub_call);
__ j(zero, &generic_stub_call);
__ CmpObjectType(rbx, MAP_TYPE, rcx); __ CmpObjectType(rbx, MAP_TYPE, rcx);
__ j(not_equal, &generic_stub_call); __ j(not_equal, &generic_stub_call);

4
deps/v8/src/x64/virtual-frame-x64.cc

@ -65,8 +65,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG #ifdef DEBUG
// Verify that rdi contains a JS function. The following code // Verify that rdi contains a JS function. The following code
// relies on rax being available for use. // relies on rax being available for use.
__ testl(rdi, Immediate(kSmiTagMask)); Condition not_smi = masm()->CheckNotSmi(rdi);
__ Check(not_zero, __ Check(not_smi,
"VirtualFrame::Enter - rdi is not a function (smi check)."); "VirtualFrame::Enter - rdi is not a function (smi check).");
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax); __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ Check(equal, __ Check(equal,

2
deps/v8/test/cctest/cctest.status

@ -36,8 +36,6 @@ test-api/ApplyInterruption: PASS || TIMEOUT
[ $arch == arm ] [ $arch == arm ]
test-debug: SKIP
# BUG(113): Test seems flaky on ARM. # BUG(113): Test seems flaky on ARM.
test-spaces/LargeObjectSpace: PASS || FAIL test-spaces/LargeObjectSpace: PASS || FAIL

8
deps/v8/test/cctest/test-conversions.cc

@ -91,13 +91,15 @@ TEST(NonStrDecimalLiteral) {
CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS)); CHECK_EQ(0.0, StringToDouble(" ", NO_FLAGS));
} }
class OneBit1: public BitField<uint32_t, 0, 1> {};
class OneBit2: public BitField<uint32_t, 7, 1> {};
class EightBit1: public BitField<uint32_t, 0, 8> {};
class EightBit2: public BitField<uint32_t, 13, 8> {};
TEST(BitField) { TEST(BitField) {
uint32_t x; uint32_t x;
// One bit bit field can hold values 0 and 1. // One bit bit field can hold values 0 and 1.
class OneBit1: public BitField<uint32_t, 0, 1> {};
class OneBit2: public BitField<uint32_t, 7, 1> {};
CHECK(!OneBit1::is_valid(static_cast<uint32_t>(-1))); CHECK(!OneBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!OneBit2::is_valid(static_cast<uint32_t>(-1))); CHECK(!OneBit2::is_valid(static_cast<uint32_t>(-1)));
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
@ -113,8 +115,6 @@ TEST(BitField) {
CHECK(!OneBit2::is_valid(2)); CHECK(!OneBit2::is_valid(2));
// Eight bit bit field can hold values from 0 tp 255. // Eight bit bit field can hold values from 0 tp 255.
class EightBit1: public BitField<uint32_t, 0, 8> {};
class EightBit2: public BitField<uint32_t, 13, 8> {};
CHECK(!EightBit1::is_valid(static_cast<uint32_t>(-1))); CHECK(!EightBit1::is_valid(static_cast<uint32_t>(-1)));
CHECK(!EightBit2::is_valid(static_cast<uint32_t>(-1))); CHECK(!EightBit2::is_valid(static_cast<uint32_t>(-1)));
for (int i = 0; i < 256; i++) { for (int i = 0; i < 256; i++) {

7
deps/v8/test/cctest/test-debug.cc

@ -2301,13 +2301,8 @@ TEST(DebugStepLinearMixedICs) {
break_point_hit_count = 0; break_point_hit_count = 0;
foo->Call(env->Global(), 0, NULL); foo->Call(env->Global(), 0, NULL);
// With stepping all break locations are hit. For ARM the keyed load/store // With stepping all break locations are hit.
// is not hit as they are not implemented as ICs.
#if defined (__arm__) || defined(__thumb__)
CHECK_EQ(6, break_point_hit_count);
#else
CHECK_EQ(8, break_point_hit_count); CHECK_EQ(8, break_point_hit_count);
#endif
v8::Debug::SetDebugEventListener(NULL); v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded(); CheckDebuggerUnloaded();

28
deps/v8/test/cctest/test-strings.cc

@ -48,6 +48,21 @@ static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024; static const int SUPER_DEEP_DEPTH = 80 * 1024;
class Resource: public v8::String::ExternalStringResource,
public ZoneObject {
public:
explicit Resource(Vector<const uc16> string): data_(string.start()) {
length_ = string.length();
}
virtual const uint16_t* data() const { return data_; }
virtual size_t length() const { return length_; }
private:
const uc16* data_;
size_t length_;
};
static void InitializeBuildingBlocks( static void InitializeBuildingBlocks(
Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) { Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
// A list of pointers that we don't have any interest in cleaning up. // A list of pointers that we don't have any interest in cleaning up.
@ -83,19 +98,6 @@ static void InitializeBuildingBlocks(
break; break;
} }
case 2: { case 2: {
class Resource: public v8::String::ExternalStringResource,
public ZoneObject {
public:
explicit Resource(Vector<const uc16> string): data_(string.start()) {
length_ = string.length();
}
virtual const uint16_t* data() const { return data_; }
virtual size_t length() const { return length_; }
private:
const uc16* data_;
size_t length_;
};
uc16* buf = Zone::NewArray<uc16>(len); uc16* buf = Zone::NewArray<uc16>(len);
for (int j = 0; j < len; j++) { for (int j = 0; j < len; j++) {
buf[j] = gen() % 65536; buf[j] = gen() % 65536;

3
deps/v8/test/mjsunit/array-splice.js

@ -309,3 +309,6 @@ Array.prototype[1] = 1;
assertEquals(1, arr.pop()); assertEquals(1, arr.pop());
assertEquals(0, arr.pop()); assertEquals(0, arr.pop());
Array.prototype[1] = undefined; Array.prototype[1] = undefined;
// Test http://code.google.com/p/chromium/issues/detail?id=21860
Array.prototype.push.apply([], [1].splice(0, -(-1 % 5)));

25
deps/v8/test/mjsunit/mjsunit.status

@ -45,31 +45,6 @@ debug-scripts-request: PASS, SKIP if $mode == debug
# Flaky test that can hit compilation-time stack overflow in debug mode. # Flaky test that can hit compilation-time stack overflow in debug mode.
unicode-test: PASS, (PASS || FAIL) if $mode == debug unicode-test: PASS, (PASS || FAIL) if $mode == debug
# Bug number 1020483: Debug tests fail on ARM.
debug-constructor: CRASH, FAIL
debug-continue: SKIP
debug-evaluate-recursive: CRASH || FAIL
debug-changebreakpoint: CRASH || FAIL
debug-clearbreakpoint: CRASH || FAIL
debug-clearbreakpointgroup: PASS, FAIL if $mode == debug
debug-conditional-breakpoints: CRASH || FAIL
debug-evaluate: CRASH || FAIL
debug-ignore-breakpoints: CRASH || FAIL
debug-multiple-breakpoints: CRASH || FAIL
debug-setbreakpoint: CRASH || FAIL || PASS
debug-step-stub-callfunction: SKIP
debug-stepin-accessor: CRASH || FAIL
debug-stepin-builtin: CRASH || FAIL
debug-stepin-call-function-stub: CRASH || FAIL
debug-stepin-constructor: CRASH, FAIL
debug-stepin-function-call: CRASH || FAIL
debug-stepout-recursive-function: CRASH || FAIL
debug-stepout-to-builtin: CRASH || FAIL
debug-step: SKIP
debug-breakpoints: PASS || FAIL
debug-handle: CRASH || FAIL || PASS
regress/regress-269: SKIP
# Bug number 130 http://code.google.com/p/v8/issues/detail?id=130 # Bug number 130 http://code.google.com/p/v8/issues/detail?id=130
# Fails on real ARM hardware but not on the simulator. # Fails on real ARM hardware but not on the simulator.
string-compare-alignment: PASS || FAIL string-compare-alignment: PASS || FAIL

60
deps/v8/test/mjsunit/smi-negative-zero.js

@ -47,40 +47,40 @@ assertEquals(one / (minus_one * minus_one), 1, "one / 1");
assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III"); assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
assertEquals(one / (zero / one), Infinity, "one / 0 II"); assertEquals(one / (zero / one), Infinity, "one / 0 II");
assertEquals(one / (minus_four % two), -Infinity, "foo"); assertEquals(one / (minus_four % two), -Infinity, "foo1");
assertEquals(one / (minus_four % minus_two), -Infinity, "foo"); assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
assertEquals(one / (four % two), Infinity, "foo"); assertEquals(one / (four % two), Infinity, "foo3");
assertEquals(one / (four % minus_two), Infinity, "foo"); assertEquals(one / (four % minus_two), Infinity, "foo4");
// literal op variable // literal op variable
assertEquals(one / (0 * minus_one), -Infinity, "bar"); assertEquals(one / (0 * minus_one), -Infinity, "bar1");
assertEquals(one / (-1 * zero), -Infinity, "bar"); assertEquals(one / (-1 * zero), -Infinity, "bar2");
assertEquals(one / (0 * zero), Infinity, "bar"); assertEquals(one / (0 * zero), Infinity, "bar3");
assertEquals(one / (-1 * minus_one), 1, "bar"); assertEquals(one / (-1 * minus_one), 1, "bar4");
assertEquals(one / (0 / minus_one), -Infinity, "baz"); assertEquals(one / (0 / minus_one), -Infinity, "baz1");
assertEquals(one / (0 / one), Infinity, "baz"); assertEquals(one / (0 / one), Infinity, "baz2");
assertEquals(one / (-4 % two), -Infinity, "baz"); assertEquals(one / (-4 % two), -Infinity, "baz3");
assertEquals(one / (-4 % minus_two), -Infinity, "baz"); assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
assertEquals(one / (4 % two), Infinity, "baz"); assertEquals(one / (4 % two), Infinity, "baz5");
assertEquals(one / (4 % minus_two), Infinity, "baz"); assertEquals(one / (4 % minus_two), Infinity, "baz6");
// variable op literal // variable op literal
assertEquals(one / (zero * -1), -Infinity, "fizz"); assertEquals(one / (zero * -1), -Infinity, "fizz1");
assertEquals(one / (minus_one * 0), -Infinity, "fizz"); assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
assertEquals(one / (zero * 0), Infinity, "fizz"); assertEquals(one / (zero * 0), Infinity, "fizz3");
assertEquals(one / (minus_one * -1), 1, "fizz"); assertEquals(one / (minus_one * -1), 1, "fizz4");
assertEquals(one / (zero / -1), -Infinity, "buzz"); assertEquals(one / (zero / -1), -Infinity, "buzz1");
assertEquals(one / (zero / 1), Infinity, "buzz"); assertEquals(one / (zero / 1), Infinity, "buzz2");
assertEquals(one / (minus_four % 2), -Infinity, "buzz"); assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
assertEquals(one / (minus_four % -2), -Infinity, "buzz"); assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
assertEquals(one / (four % 2), Infinity, "buzz"); assertEquals(one / (four % 2), Infinity, "buzz5");
assertEquals(one / (four % -2), Infinity, "buzz"); assertEquals(one / (four % -2), Infinity, "buzz6");
// literal op literal // literal op literal
@ -91,10 +91,10 @@ assertEquals(one / (-1 * 0), -Infinity, "fisk3");
assertEquals(one / (0 * 0), Infinity, "fisk4"); assertEquals(one / (0 * 0), Infinity, "fisk4");
assertEquals(one / (-1 * -1), 1, "fisk5"); assertEquals(one / (-1 * -1), 1, "fisk5");
assertEquals(one / (0 / -1), -Infinity, "hest"); assertEquals(one / (0 / -1), -Infinity, "hest1");
assertEquals(one / (0 / 1), Infinity, "hest"); assertEquals(one / (0 / 1), Infinity, "hest2");
assertEquals(one / (-4 % 2), -Infinity, "fiskhest"); assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
assertEquals(one / (-4 % -2), -Infinity, "fiskhest"); assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
assertEquals(one / (4 % 2), Infinity, "fiskhest"); assertEquals(one / (4 % 2), Infinity, "fiskhest3");
assertEquals(one / (4 % -2), Infinity, "fiskhest"); assertEquals(one / (4 % -2), Infinity, "fiskhest4");

4
deps/v8/tools/gyp/v8.gyp

@ -34,12 +34,10 @@
'v8_use_snapshot%': 'true', 'v8_use_snapshot%': 'true',
'v8_regexp%': 'native', 'v8_regexp%': 'native',
}, },
'includes': [
'../../../build/common.gypi',
],
'target_defaults': { 'target_defaults': {
'defines': [ 'defines': [
'ENABLE_LOGGING_AND_PROFILING', 'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
], ],
'conditions': [ 'conditions': [
['target_arch=="arm"', { ['target_arch=="arm"', {

11
deps/v8/tools/presubmit.py

@ -30,7 +30,7 @@
import optparse import optparse
import os import os
from os.path import abspath, join, dirname, basename from os.path import abspath, join, dirname, basename, exists
import re import re
import sys import sys
import subprocess import subprocess
@ -103,7 +103,7 @@ class SourceFileProcessor(object):
all_files = [] all_files = []
for file in self.GetPathsToSearch(): for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file)) all_files += self.FindFilesIn(join(path, file))
if not self.ProcessFiles(all_files): if not self.ProcessFiles(all_files, path):
return False return False
return True return True
@ -145,9 +145,12 @@ class CppLintProcessor(SourceFileProcessor):
def GetPathsToSearch(self): def GetPathsToSearch(self):
return ['src', 'public', 'samples', join('test', 'cctest')] return ['src', 'public', 'samples', join('test', 'cctest')]
def ProcessFiles(self, files): def ProcessFiles(self, files, path):
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES]) filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
command = ['cpplint.py', '--filter', filt] + join(files) command = ['cpplint.py', '--filter', filt] + join(files)
local_cpplint = join(path, "tools", "cpplint.py")
if exists(local_cpplint):
command = ['python', local_cpplint, '--filter', filt] + join(files)
process = subprocess.Popen(command) process = subprocess.Popen(command)
return process.wait() == 0 return process.wait() == 0
@ -194,7 +197,7 @@ class SourceProcessor(SourceFileProcessor):
result = False result = False
return result return result
def ProcessFiles(self, files): def ProcessFiles(self, files, path):
success = True success = True
for file in files: for file in files:
try: try:

6
deps/v8/tools/test.py

@ -1084,6 +1084,8 @@ def BuildOptions():
choices=PROGRESS_INDICATORS.keys(), default="mono") choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements", result.add_option("--no-build", help="Don't build requirements",
default=False, action="store_true") default=False, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run", result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true") default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite", result.add_option("-s", "--suite", help="A test suite",
@ -1261,6 +1263,10 @@ def Main():
if not BuildRequirements(context, reqs, options.mode, options.scons_flags): if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1 return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests # Get status for tests
sections = [ ] sections = [ ]
defs = { } defs = { }

2
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -1489,6 +1489,7 @@
V8_TARGET_ARCH_IA32, V8_TARGET_ARCH_IA32,
V8_NATIVE_REGEXP, V8_NATIVE_REGEXP,
ENABLE_LOGGING_AND_PROFILING, ENABLE_LOGGING_AND_PROFILING,
ENABLE_DEBUGGER_SUPPORT,
); );
HEADER_SEARCH_PATHS = ../src; HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = v8; PRODUCT_NAME = v8;
@ -1537,6 +1538,7 @@
V8_TARGET_ARCH_ARM, V8_TARGET_ARCH_ARM,
ENABLE_DISASSEMBLER, ENABLE_DISASSEMBLER,
ENABLE_LOGGING_AND_PROFILING, ENABLE_LOGGING_AND_PROFILING,
ENABLE_DEBUGGER_SUPPORT,
); );
HEADER_SEARCH_PATHS = ../src; HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = "v8-arm"; PRODUCT_NAME = "v8-arm";

2
deps/v8/tools/visual_studio/common.vsprops

@ -8,7 +8,7 @@
<Tool <Tool
Name="VCCLCompilerTool" Name="VCCLCompilerTool"
AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources" AdditionalIncludeDirectories="$(ProjectDir)\..\..\src;$(IntDir)\DerivedSources"
PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING" PreprocessorDefinitions="WIN32;_CRT_SECURE_NO_DEPRECATE;_CRT_NONSTDC_NO_DEPRECATE;_HAS_EXCEPTIONS=0;ENABLE_LOGGING_AND_PROFILING;ENABLE_DEBUGGER_SUPPORT"
MinimalRebuild="false" MinimalRebuild="false"
ExceptionHandling="0" ExceptionHandling="0"
RuntimeTypeInfo="false" RuntimeTypeInfo="false"

Loading…
Cancel
Save