Browse Source

Upgrade V8 to 3.0.9

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
b8f006ef72
  1. 10
      deps/v8/ChangeLog
  2. 2
      deps/v8/LICENSE
  3. 20
      deps/v8/SConstruct
  4. 3
      deps/v8/src/SConscript
  5. 8
      deps/v8/src/arm/assembler-arm.cc
  6. 6
      deps/v8/src/arm/assembler-arm.h
  7. 8
      deps/v8/src/arm/ic-arm.cc
  8. 13
      deps/v8/src/arm/lithium-arm.cc
  9. 12
      deps/v8/src/arm/lithium-arm.h
  10. 33
      deps/v8/src/arm/lithium-codegen-arm.cc
  11. 54
      deps/v8/src/arm/stub-cache-arm.cc
  12. 10
      deps/v8/src/assembler.cc
  13. 28
      deps/v8/src/assembler.h
  14. 9
      deps/v8/src/builtins.cc
  15. 2
      deps/v8/src/code-stubs.cc
  16. 11
      deps/v8/src/codegen.cc
  17. 12
      deps/v8/src/compiler.cc
  18. 10
      deps/v8/src/compiler.h
  19. 8
      deps/v8/src/flag-definitions.h
  20. 1
      deps/v8/src/frames.cc
  21. 11
      deps/v8/src/full-codegen.cc
  22. 1170
      deps/v8/src/gdb-jit.cc
  23. 136
      deps/v8/src/gdb-jit.h
  24. 3
      deps/v8/src/hydrogen-instructions.cc
  25. 18
      deps/v8/src/hydrogen-instructions.h
  26. 13
      deps/v8/src/hydrogen.cc
  27. 3
      deps/v8/src/hydrogen.h
  28. 12
      deps/v8/src/ia32/ic-ia32.cc
  29. 463
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  30. 44
      deps/v8/src/ia32/lithium-codegen-ia32.h
  31. 461
      deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
  32. 110
      deps/v8/src/ia32/lithium-gap-resolver-ia32.h
  33. 145
      deps/v8/src/ia32/lithium-ia32.cc
  34. 1064
      deps/v8/src/ia32/lithium-ia32.h
  35. 49
      deps/v8/src/ia32/stub-cache-ia32.cc
  36. 309
      deps/v8/src/ic.cc
  37. 13
      deps/v8/src/ic.h
  38. 63
      deps/v8/src/inspector.cc
  39. 62
      deps/v8/src/inspector.h
  40. 12
      deps/v8/src/lithium-allocator.cc
  41. 48
      deps/v8/src/lithium-allocator.h
  42. 19
      deps/v8/src/lithium.cc
  43. 3
      deps/v8/src/lithium.h
  44. 12
      deps/v8/src/mark-compact.cc
  45. 13
      deps/v8/src/objects-debug.cc
  46. 49
      deps/v8/src/objects-inl.h
  47. 162
      deps/v8/src/objects.h
  48. 2
      deps/v8/src/platform-win32.cc
  49. 2
      deps/v8/src/preparse-data.h
  50. 1
      deps/v8/src/runtime-profiler.cc
  51. 112
      deps/v8/src/runtime.cc
  52. 5
      deps/v8/src/scopeinfo.cc
  53. 62
      deps/v8/src/scopes.cc
  54. 171
      deps/v8/src/stub-cache.cc
  55. 18
      deps/v8/src/stub-cache.h
  56. 29
      deps/v8/src/third_party/strongtalk/LICENSE
  57. 18
      deps/v8/src/third_party/strongtalk/README.chromium
  58. 15
      deps/v8/src/type-info.cc
  59. 6
      deps/v8/src/type-info.h
  60. 2
      deps/v8/src/version.cc
  61. 6
      deps/v8/src/x64/assembler-x64.cc
  62. 2
      deps/v8/src/x64/assembler-x64.h
  63. 1
      deps/v8/src/x64/code-stubs-x64.h
  64. 6
      deps/v8/src/x64/deoptimizer-x64.cc
  65. 2
      deps/v8/src/x64/frames-x64.h
  66. 8
      deps/v8/src/x64/ic-x64.cc
  67. 144
      deps/v8/src/x64/lithium-codegen-x64.cc
  68. 110
      deps/v8/src/x64/lithium-x64.cc
  69. 1075
      deps/v8/src/x64/lithium-x64.h
  70. 63
      deps/v8/src/x64/stub-cache-x64.cc
  71. 10
      deps/v8/test/cctest/cctest.status
  72. 61
      deps/v8/test/mjsunit/compiler/regress-serialized-slots.js
  73. 3
      deps/v8/test/mjsunit/mjsunit.status
  74. 13
      deps/v8/test/mozilla/mozilla.status
  75. 6
      deps/v8/tools/gyp/v8.gyp

10
deps/v8/ChangeLog

@ -1,7 +1,15 @@
2011-01-19: Version 3.0.9
Added basic GDB JIT Interface integration.
Make invalid break/continue statements a syntax error instead of a
runtime error.
2011-01-17: Version 3.0.8
Exposed heap size limit to the heap statistics gathered by
the GetHeapStatistics API.
the GetHeapStatistics API.
Wrapped external pointers more carefully (issue 1037).

2
deps/v8/LICENSE

@ -24,7 +24,7 @@ are:
These libraries have their own licenses; we recommend you read them,
as their terms may differ from the terms below.
Copyright 2006-2009, Google Inc. All rights reserved.
Copyright 2006-2011, the V8 project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:

20
deps/v8/SConstruct

@ -124,12 +124,15 @@ LIBRARY_FLAGS = {
},
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
},
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
}
},
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
@ -229,6 +232,9 @@ LIBRARY_FLAGS = {
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
},
'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
}
},
'msvc': {
@ -705,6 +711,11 @@ SIMPLE_OPTIONS = {
'default': 'off',
'help': 'enable profiling of build target'
},
'gdbjit': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable GDB JIT interface'
},
'library': {
'values': ['static', 'shared'],
'default': 'static',
@ -735,6 +746,11 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'enable debugging of JavaScript code'
},
'inspector': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable inspector features'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
@ -871,6 +887,8 @@ def VerifyOptions(env):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':

3
deps/v8/src/SConscript

@ -71,6 +71,7 @@ SOURCES = {
frames.cc
full-codegen.cc
func-name-inferrer.cc
gdb-jit.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
@ -81,6 +82,7 @@ SOURCES = {
hydrogen.cc
hydrogen-instructions.cc
ic.cc
inspector.cc
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
@ -190,6 +192,7 @@ SOURCES = {
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc

8
deps/v8/src/arm/assembler-arm.cc

@ -2495,6 +2495,10 @@ void Assembler::GrowBuffer() {
void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_prinfo_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -2502,6 +2506,10 @@ void Assembler::db(uint8_t data) {
void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_prinfo_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);

6
deps/v8/src/arm/assembler-arm.h

@ -1240,8 +1240,10 @@ class Assembler : public Malloced {
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables. The constant pool should be
// emitted before any use of db and dd to ensure that constant pools
// are not emitted as part of the tables generated.
void db(uint8_t data);
void dd(uint32_t data);

8
deps/v8/src/arm/ic-arm.cc

@ -542,8 +542,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.

13
deps/v8/src/arm/lithium-arm.cc

@ -576,6 +576,13 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value, new LUnallocated(LUnallocated::ANY));
}
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
@ -907,11 +914,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
} else {
op = UseOrConstant(value);
if (op->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(op);
unalloc->set_policy(LUnallocated::ANY);
}
op = UseAny(value);
}
result->AddValue(op, value->representation());
}

12
deps/v8/src/arm/lithium-arm.h

@ -1887,15 +1887,25 @@ class LChunkBuilder BASE_EMBEDDED {
LOperand* UseRegister(HValue* value);
LOperand* UseRegisterAtStart(HValue* value);
// A value in a register that may be trashed.
// An input operand in a register that may be trashed.
LOperand* UseTempRegister(HValue* value);
// An input operand in a register or stack slot.
LOperand* Use(HValue* value);
LOperand* UseAtStart(HValue* value);
// An input operand in a register, stack slot or a constant operand.
LOperand* UseOrConstant(HValue* value);
LOperand* UseOrConstantAtStart(HValue* value);
// An input operand in a register or a constant operand.
LOperand* UseRegisterOrConstant(HValue* value);
LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
LOperand* UseAny(HValue* value);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
LInstruction* Define(LInstruction* instr, LUnallocated* result);

33
deps/v8/src/arm/lithium-codegen-arm.cc

@ -172,13 +172,13 @@ bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
@ -341,6 +341,11 @@ bool LCodeGen::GenerateDeferredCode() {
__ jmp(code->exit());
}
// Force constant pool emission at the end of deferred code to make
// sure that no constant pools are emitted after the official end of
// the instruction sequence.
masm()->CheckConstPool(true, false);
// Deferred code is the last part of the instruction sequence. Mark
// the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
@ -816,8 +821,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(dbl_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
@ -999,7 +1004,6 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
Abort("ModI not implemented");
class DeferredModI: public LDeferredCode {
public:
DeferredModI(LCodeGen* codegen, LModI* instr)
@ -1055,7 +1059,6 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
Abort("DivI not implemented");
class DeferredDivI: public LDeferredCode {
public:
DeferredDivI(LCodeGen* codegen, LDivI* instr)
@ -1293,7 +1296,10 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
Abort("DoConstantD unimplemented.");
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
__ vmov(result, v);
}
@ -2336,6 +2342,15 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
// Move the result back to general purpose register r0.
__ vmov(result, single_scratch);
// Test for -0.
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
__ vmov(scratch, input.high());
__ tst(scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}

54
deps/v8/src/arm/stub-cache-arm.cc

@ -1332,11 +1332,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
kind_);
Object* obj;
{ MaybeObject* maybe_obj =
StubCache::ComputeCallMiss(arguments().immediate(), kind_);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
return obj;
}
@ -1646,8 +1645,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
Label* index_out_of_range_label = &index_out_of_range;
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1675,7 +1681,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
@ -1684,12 +1690,17 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kNanValueRootIndex);
__ Drop(argc + 1);
__ Ret();
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kNanValueRootIndex);
__ Drop(argc + 1);
__ Ret();
}
__ bind(&miss);
// Restore function name in r2.
__ Move(r2, Handle<String>(name));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -1720,9 +1731,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
GenerateNameCheck(name, &miss);
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1752,7 +1769,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
@ -1761,12 +1778,17 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kEmptyStringRootIndex);
__ Drop(argc + 1);
__ Ret();
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kEmptyStringRootIndex);
__ Drop(argc + 1);
__ Ret();
}
__ bind(&miss);
// Restore function name in r2.
__ Move(r2, Handle<String>(name));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;

10
deps/v8/src/assembler.cc

@ -917,6 +917,11 @@ void PositionsRecorder::RecordPosition(int pos) {
ASSERT(pos != RelocInfo::kNoPosition);
ASSERT(pos >= 0);
state_.current_position = pos;
#ifdef ENABLE_GDB_JIT_INTERFACE
if (gdbjit_lineinfo_ != NULL) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
}
#endif
}
@ -924,6 +929,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
ASSERT(pos != RelocInfo::kNoPosition);
ASSERT(pos >= 0);
state_.current_statement_position = pos;
#ifdef ENABLE_GDB_JIT_INTERFACE
if (gdbjit_lineinfo_ != NULL) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
}
#endif
}

28
deps/v8/src/assembler.h

@ -35,6 +35,7 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
#include "gdb-jit.h"
#include "runtime.h"
#include "top.h"
#include "token.h"
@ -637,7 +638,29 @@ struct PositionState {
class PositionsRecorder BASE_EMBEDDED {
public:
explicit PositionsRecorder(Assembler* assembler)
: assembler_(assembler) {}
: assembler_(assembler) {
#ifdef ENABLE_GDB_JIT_INTERFACE
gdbjit_lineinfo_ = NULL;
#endif
}
#ifdef ENABLE_GDB_JIT_INTERFACE
~PositionsRecorder() {
delete gdbjit_lineinfo_;
}
void StartGDBJITLineInfoRecording() {
if (FLAG_gdbjit) {
gdbjit_lineinfo_ = new GDBJITLineInfo();
}
}
GDBJITLineInfo* DetachGDBJITLineInfo() {
GDBJITLineInfo* lineinfo = gdbjit_lineinfo_;
gdbjit_lineinfo_ = NULL; // To prevent deallocation in destructor.
return lineinfo;
}
#endif
// Set current position to pos.
void RecordPosition(int pos);
@ -657,6 +680,9 @@ class PositionsRecorder BASE_EMBEDDED {
private:
Assembler* assembler_;
PositionState state_;
#ifdef ENABLE_GDB_JIT_INTERFACE
GDBJITLineInfo* gdbjit_lineinfo_;
#endif
friend class PreservePositionScope;

9
deps/v8/src/builtins.cc

@ -31,6 +31,7 @@
#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "vm-state-inl.h"
@ -1550,7 +1551,7 @@ void Builtins::Setup(bool create_heap_objects) {
CodeDesc desc;
masm.GetCode(&desc);
Code::Flags flags = functions[i].flags;
Object* code = 0;
Object* code = NULL;
{
// During startup it's OK to always allocate and defer GC to later.
// This simplifies things because we don't need to retry.
@ -1564,7 +1565,11 @@ void Builtins::Setup(bool create_heap_objects) {
}
// Log the event and add the code to the builtins array.
PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name));
Code::cast(code),
functions[i].s_name));
GDBJIT(AddCode(GDBJITInterface::BUILTIN,
functions[i].s_name,
Code::cast(code)));
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {

2
deps/v8/src/code-stubs.cc

@ -30,6 +30,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
#include "oprofile-agent.h"
@ -66,6 +67,7 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->instruction_start(),
code->instruction_size()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER

11
deps/v8/src/codegen.cc

@ -248,6 +248,9 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
// Generate code.
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
cgen.Generate(info);
@ -263,6 +266,14 @@ bool CodeGenerator::MakeCode(CompilationInfo* info) {
code->SetNoStackCheckTable();
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (!code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
return !code.is_null();
}

12
deps/v8/src/compiler.cc

@ -35,6 +35,7 @@
#include "data-flow.h"
#include "debug.h"
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "liveedit.h"
@ -207,7 +208,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// Limit the number of times we re-compile a functions with
// the optimizing compiler.
const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? 10 : 1000;
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not
@ -420,6 +422,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code()));
} else {
PROFILE(CodeCreateEvent(
info->is_eval()
@ -430,6 +435,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script",
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
// Allocate function.
@ -793,6 +799,10 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
code->instruction_size()));
}
}
GDBJIT(AddCode(name,
Handle<Script>(info->script()),
Handle<Code>(info->code())));
}
} } // namespace v8::internal

10
deps/v8/src/compiler.h

@ -209,9 +209,13 @@ class CompilationInfo BASE_EMBEDDED {
class Compiler : public AllStatic {
public:
// All routines return a JSFunction.
// If an error occurs an exception is raised and
// the return handle contains NULL.
// Default maximum number of function optimization attempts before we
// give up.
static const int kDefaultMaxOptCount = 10;
// All routines return a SharedFunctionInfo.
// If an error occurs an exception is raised and the return handle
// contains NULL.
// Compile a String source within a context.
static Handle<SharedFunctionInfo> Compile(Handle<String> source,

8
deps/v8/src/flag-definitions.h

@ -366,6 +366,14 @@ DEFINE_bool(debug_script_collected_events, true,
"Enable debugger script collected events")
#endif
//
// GDB JIT integration flags.
//
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
//
// Debug only flags
//

1
deps/v8/src/frames.cc

@ -554,6 +554,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
parameters_base += safepoint_entry.argument_count();
}
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
parameters_base += DoubleRegister::kNumAllocatableRegisters *
kDoubleSize / kPointerSize;

11
deps/v8/src/full-codegen.cc

@ -286,6 +286,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
FullCodeGenerator cgen(&masm);
cgen.Generate(info);
@ -304,6 +307,14 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_stack_check_table_start(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (!code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
return !code.is_null();
}

1170
deps/v8/src/gdb-jit.cc

File diff suppressed because it is too large

136
deps/v8/src/gdb-jit.h

@ -0,0 +1,136 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_GDB_JIT_H_
#define V8_GDB_JIT_H_
//
// Basic implementation of GDB JIT Interface client.
// GBD JIT Interface is supported in GDB 7.0 and above.
// Currently on x64 and ia32 architectures and Linux OS are supported.
//
#ifdef ENABLE_GDB_JIT_INTERFACE
#include "v8.h"
#include "factory.h"
namespace v8 {
namespace internal {
#define CODE_TAGS_LIST(V) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
V(STORE_IC) \
V(KEYED_STORE_IC) \
V(CALL_IC) \
V(CALL_INITIALIZE) \
V(CALL_PRE_MONOMORPHIC) \
V(CALL_NORMAL) \
V(CALL_MEGAMORPHIC) \
V(CALL_MISS) \
V(STUB) \
V(BUILTIN) \
V(SCRIPT) \
V(EVAL)
class GDBJITLineInfo : public Malloced {
public:
GDBJITLineInfo()
: pc_info_(10) { }
void SetPosition(intptr_t pc, int pos, bool is_statement) {
AddPCInfo(PCInfo(pc, pos, is_statement));
}
struct PCInfo {
PCInfo(intptr_t pc, int pos, bool is_statement)
: pc_(pc), pos_(pos), is_statement_(is_statement) { }
intptr_t pc_;
int pos_;
bool is_statement_;
};
List<PCInfo>* pc_info() {
return &pc_info_;
}
private:
void AddPCInfo(const PCInfo& pc_info) {
pc_info_.Add(pc_info);
}
List<PCInfo> pc_info_;
};
class GDBJITInterface: public AllStatic {
public:
enum CodeTag {
#define V(x) x,
CODE_TAGS_LIST(V)
#undef V
TAG_COUNT
};
static const char* Tag2String(CodeTag tag) {
switch (tag) {
#define V(x) case x: return #x;
CODE_TAGS_LIST(V)
#undef V
default:
return NULL;
}
}
static void AddCode(const char* name,
Code* code,
Script* script = NULL);
static void AddCode(Handle<String> name,
Handle<Script> script,
Handle<Code> code);
static void AddCode(CodeTag tag, String* name, Code* code);
static void AddCode(CodeTag tag, const char* name, Code* code);
static void AddCode(CodeTag tag, Code* code);
static void RemoveCode(Code* code);
static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
};
#define GDBJIT(action) GDBJITInterface::action
} } // namespace v8::internal
#else
#define GDBJIT(action) ((void) 0)
#endif
#endif

3
deps/v8/src/hydrogen-instructions.cc

@ -996,7 +996,8 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
has_int32_value_ = static_cast<double>(static_cast<int32_t>(n)) == n;
double roundtrip_value = static_cast<double>(static_cast<int32_t>(n));
has_int32_value_ = BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(n);
if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
double_value_ = n;
has_double_value_ = true;

18
deps/v8/src/hydrogen-instructions.h

@ -773,6 +773,10 @@ class HInstruction: public HValue {
virtual void Verify() const;
#endif
// Returns whether this is some kind of deoptimizing check
// instruction.
virtual bool IsCheckInstruction() const { return false; }
DECLARE_INSTRUCTION(Instruction)
protected:
@ -1504,6 +1508,8 @@ class HCheckMap: public HUnaryOperation {
SetFlag(kDependsOnMaps);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -1537,6 +1543,8 @@ class HCheckFunction: public HUnaryOperation {
SetFlag(kUseGVN);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -1573,6 +1581,8 @@ class HCheckInstanceType: public HUnaryOperation {
SetFlag(kUseGVN);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -1610,6 +1620,8 @@ class HCheckNonSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -1632,6 +1644,8 @@ class HCheckPrototypeMaps: public HInstruction {
SetFlag(kDependsOnMaps);
}
virtual bool IsCheckInstruction() const { return true; }
#ifdef DEBUG
virtual void Verify() const;
#endif
@ -1668,6 +1682,8 @@ class HCheckSmi: public HUnaryOperation {
SetFlag(kUseGVN);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -1996,6 +2012,8 @@ class HBoundsCheck: public HBinaryOperation {
SetFlag(kUseGVN);
}
virtual bool IsCheckInstruction() const { return true; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Integer32();
}

13
deps/v8/src/hydrogen.cc

@ -687,6 +687,11 @@ HGraph::HGraph(CompilationInfo* info)
}
bool HGraph::AllowAggressiveOptimizations() const {
return info()->shared_info()->opt_count() + 1 < Compiler::kDefaultMaxOptCount;
}
Handle<Code> HGraph::Compile() {
int values = GetMaximumValueID();
if (values > LAllocator::max_initial_value_ids()) {
@ -1453,8 +1458,12 @@ void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
// about code that was never executed.
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
if (!instr->IsChange() &&
FLAG_aggressive_loop_invariant_motion) return true;
if (FLAG_aggressive_loop_invariant_motion &&
!instr->IsChange() &&
(!instr->IsCheckInstruction() ||
graph_->AllowAggressiveOptimizations())) {
return true;
}
HBasicBlock* block = instr->block();
bool result = true;
if (block != loop_header) {

3
deps/v8/src/hydrogen.h

@ -296,6 +296,9 @@ class HGraph: public HSubgraph {
explicit HGraph(CompilationInfo* info);
CompilationInfo* info() const { return info_; }
bool AllowAggressiveOptimizations() const;
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
Handle<String> debug_name() const { return info_->function()->debug_name(); }

12
deps/v8/src/ia32/ic-ia32.cc

@ -1231,8 +1231,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
@ -1325,7 +1329,9 @@ static void GenerateCallNormal(MacroAssembler* masm, int argc) {
}
static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
static void GenerateCallMiss(MacroAssembler* masm,
int argc,
IC::UtilityId id) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address

463
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -58,157 +58,6 @@ class SafepointGenerator : public PostCallGenerator {
};
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()->
bool LCodeGen::GenerateCode() {
@ -427,6 +276,14 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
int index = op->index();
int offset = (index >= 0) ? index + 3 : index - 1;
return Operand(ebp, -offset * kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
@ -762,66 +619,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
// xmm0 must always be a scratch register.
XMMRegister xmm_scratch = xmm0;
LUnallocated marker_operand(LUnallocated::NONE);
Register cpu_scratch = esi;
bool destroys_cpu_scratch = false;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
if (from->IsConstantOperand()) {
__ mov(ToOperand(to), ToImmediate(from));
} else if (from == &marker_operand) {
if (to->IsRegister() || to->IsStackSlot()) {
__ mov(ToOperand(to), cpu_scratch);
ASSERT(destroys_cpu_scratch);
} else {
ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
__ movdbl(ToOperand(to), xmm_scratch);
}
} else if (to == &marker_operand) {
if (from->IsRegister() || from->IsStackSlot()) {
__ mov(cpu_scratch, ToOperand(from));
destroys_cpu_scratch = true;
} else {
ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
__ movdbl(xmm_scratch, ToOperand(from));
}
} else if (from->IsRegister()) {
__ mov(ToOperand(to), ToRegister(from));
} else if (to->IsRegister()) {
__ mov(ToRegister(to), ToOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ push(eax);
__ mov(eax, ToOperand(from));
__ mov(ToOperand(to), eax);
__ pop(eax);
} else if (from->IsDoubleRegister()) {
__ movdbl(ToOperand(to), ToDoubleRegister(from));
} else if (to->IsDoubleRegister()) {
__ movdbl(ToDoubleRegister(to), ToOperand(from));
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
__ movdbl(xmm_scratch, ToOperand(from));
__ movdbl(ToOperand(to), xmm_scratch);
}
}
if (destroys_cpu_scratch) {
__ mov(cpu_scratch, Operand(ebp, -kPointerSize));
}
resolver_.Resolve(move);
}
@ -908,11 +706,11 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
void LCodeGen::DoModI(LModI* instr) {
LOperand* right = instr->right();
LOperand* right = instr->InputAt(1);
ASSERT(ToRegister(instr->result()).is(edx));
ASSERT(ToRegister(instr->left()).is(eax));
ASSERT(!ToRegister(instr->right()).is(eax));
ASSERT(!ToRegister(instr->right()).is(edx));
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
Register right_reg = ToRegister(right);
@ -948,11 +746,11 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
LOperand* right = instr->right();
LOperand* right = instr->InputAt(1);
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->left()).is(eax));
ASSERT(!ToRegister(instr->right()).is(eax));
ASSERT(!ToRegister(instr->right()).is(edx));
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
Register left_reg = eax;
@ -994,11 +792,11 @@ void LCodeGen::DoDivI(LDivI* instr) {
void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right = instr->right();
Register left = ToRegister(instr->InputAt(0));
LOperand* right = instr->InputAt(1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ mov(ToRegister(instr->temp()), left);
__ mov(ToRegister(instr->TempAt(0)), left);
}
if (right->IsConstantOperand()) {
@ -1022,7 +820,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else {
// Test the non-zero operand for negative sign.
__ or_(ToRegister(instr->temp()), ToOperand(right));
__ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
DeoptimizeIf(sign, instr->environment());
}
__ bind(&done);
@ -1031,8 +829,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
void LCodeGen::DoBitI(LBitI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
@ -1072,8 +870,8 @@ void LCodeGen::DoBitI(LBitI* instr) {
void LCodeGen::DoShiftI(LShiftI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
if (right->IsRegister()) {
@ -1128,8 +926,8 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@ -1184,22 +982,22 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->input());
Register array = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->input());
Register array = ToRegister(instr->InputAt(0));
__ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
}
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->temporary());
Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
NearLabel done;
// If the object is a smi return the object.
@ -1216,14 +1014,14 @@ void LCodeGen::DoValueOf(LValueOf* instr) {
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
__ not_(ToRegister(input));
}
void LCodeGen::DoThrow(LThrow* instr) {
__ push(ToOperand(instr->input()));
__ push(ToOperand(instr->InputAt(0)));
CallRuntime(Runtime::kThrow, 1, instr);
if (FLAG_debug_code) {
@ -1234,8 +1032,8 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
@ -1251,8 +1049,8 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
// Modulo uses a fixed result register.
ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
switch (instr->op()) {
@ -1291,8 +1089,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->left()).is(edx));
ASSERT(ToRegister(instr->right()).is(eax));
ASSERT(ToRegister(instr->InputAt(0)).is(edx));
ASSERT(ToRegister(instr->InputAt(1)).is(eax));
ASSERT(ToRegister(instr->result()).is(eax));
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
@ -1333,17 +1131,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
__ test(reg, Operand(reg));
EmitBranch(true_block, false_block, not_zero);
} else if (r.IsDouble()) {
XMMRegister reg = ToDoubleRegister(instr->input());
XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(xmm0, xmm0);
__ ucomisd(reg, xmm0);
EmitBranch(true_block, false_block, not_equal);
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
__ cmp(reg, Factory::true_value());
EmitBranch(true_block, false_block, equal);
@ -1471,8 +1269,8 @@ void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
void LCodeGen::DoCmpID(LCmpID* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
LOperand* result = instr->result();
NearLabel unordered;
@ -1497,8 +1295,8 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1517,8 +1315,8 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
@ -1531,8 +1329,8 @@ void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
Register right = ToRegister(instr->right());
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1542,7 +1340,7 @@ void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
void LCodeGen::DoIsNull(LIsNull* instr) {
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
// TODO(fsc): If the expression is known to be a smi, then it's
@ -1580,7 +1378,7 @@ void LCodeGen::DoIsNull(LIsNull* instr) {
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
@ -1601,7 +1399,7 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
__ j(zero, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
Register scratch = ToRegister(instr->temp());
Register scratch = ToRegister(instr->TempAt(0));
__ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
@ -1640,9 +1438,9 @@ Condition LCodeGen::EmitIsObject(Register input,
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
Register temp = ToRegister(instr->TempAt(0));
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
@ -1660,9 +1458,9 @@ void LCodeGen::DoIsObject(LIsObject* instr) {
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
Register reg = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1676,7 +1474,7 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
void LCodeGen::DoIsSmi(LIsSmi* instr) {
Operand input = ToOperand(instr->input());
Operand input = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
@ -1690,7 +1488,7 @@ void LCodeGen::DoIsSmi(LIsSmi* instr) {
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Operand input = ToOperand(instr->input());
Operand input = ToOperand(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1700,9 +1498,9 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
InstanceType LHasInstanceType::TestType() {
InstanceType from = hydrogen()->from();
InstanceType to = hydrogen()->to();
static InstanceType TestType(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
@ -1710,9 +1508,9 @@ InstanceType LHasInstanceType::TestType() {
Condition LHasInstanceType::BranchCondition() {
InstanceType from = hydrogen()->from();
InstanceType to = hydrogen()->to();
static Condition BranchCondition(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
@ -1722,15 +1520,15 @@ Condition LHasInstanceType::BranchCondition() {
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask));
NearLabel done, is_false;
__ j(zero, &is_false);
__ CmpObjectType(input, instr->TestType(), result);
__ j(NegateCondition(instr->BranchCondition()), &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
__ mov(result, Handle<Object>(Heap::true_value()));
__ jmp(&done);
__ bind(&is_false);
@ -1740,8 +1538,8 @@ void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1751,13 +1549,13 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
__ test(input, Immediate(kSmiTagMask));
__ j(zero, false_label);
__ CmpObjectType(input, instr->TestType(), temp);
EmitBranch(true_block, false_block, instr->BranchCondition());
__ CmpObjectType(input, TestType(instr->hydrogen()), temp);
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
@ -1773,7 +1571,7 @@ void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@ -1842,10 +1640,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Register temp = ToRegister(instr->temporary());
Register temp = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
NearLabel done;
Label is_true, is_false;
@ -1865,9 +1663,9 @@ void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->input());
Register temp = ToRegister(instr->temporary());
Register temp2 = ToRegister(instr->temporary2());
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
if (input.is(temp)) {
// Swap.
Register swapper = temp;
@ -1889,7 +1687,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
@ -1946,8 +1744,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
Register object = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
// A Smi is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
@ -1957,7 +1755,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
NearLabel cache_miss;
Register map = ToRegister(instr->temp());
Register map = ToRegister(instr->TempAt(0));
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
__ cmp(map, Factory::the_hole_value()); // Patched to cached map.
@ -2005,7 +1803,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// Get the temp register reserved by the instruction. This needs to be edi as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(edi));
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 13;
@ -2110,7 +1908,7 @@ void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
Register value = ToRegister(instr->input());
Register value = ToRegister(instr->InputAt(0));
__ mov(Operand::Cell(instr->hydrogen()->cell()), value);
}
@ -2124,7 +1922,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
@ -2147,7 +1945,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register temp = ToRegister(instr->temporary());
Register temp = ToRegister(instr->TempAt(0));
Register result = ToRegister(instr->result());
// Check that the function really is a function.
@ -2188,8 +1986,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->input()));
Register reg = ToRegister(instr->input());
ASSERT(instr->result()->Equals(instr->InputAt(0)));
Register reg = ToRegister(instr->InputAt(0));
__ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
if (FLAG_debug_code) {
NearLabel done;
@ -2269,7 +2067,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Operand elem = ToOperand(instr->input());
Operand elem = ToOperand(instr->InputAt(0));
Register result = ToRegister(instr->result());
NearLabel done;
@ -2343,7 +2141,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->input();
LOperand* argument = instr->InputAt(0);
if (argument->IsConstantOperand()) {
__ push(ToImmediate(argument));
} else {
@ -2409,7 +2207,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->input());
Register input_reg = ToRegister(instr->InputAt(0));
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
Factory::heap_number_map());
DeoptimizeIf(not_equal, instr->environment());
@ -2476,17 +2274,17 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
LUnaryMathOperation* instr_;
};
ASSERT(instr->input()->Equals(instr->result()));
ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
__ pxor(scratch, scratch);
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
Register input_reg = ToRegister(instr->input());
Register input_reg = ToRegister(instr->InputAt(0));
__ test(input_reg, Operand(input_reg));
Label is_positive;
__ j(not_sign, &is_positive);
@ -2498,7 +2296,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
Label not_smi;
Register input_reg = ToRegister(instr->input());
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ test(input_reg, Immediate(kSmiTagMask));
__ j(not_zero, deferred->entry());
@ -2519,7 +2317,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
@ -2541,7 +2339,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
// xmm_scratch = 0.5
ExternalReference one_half = ExternalReference::address_of_one_half();
@ -2574,7 +2372,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
@ -2582,7 +2380,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
ExternalReference negative_infinity =
ExternalReference::address_of_negative_infinity();
@ -2594,8 +2392,8 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
@ -2708,6 +2506,7 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->InputAt(0)).is(ecx));
int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
@ -2757,7 +2556,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->input()).is(edi));
ASSERT(ToRegister(instr->InputAt(0)).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
@ -2784,12 +2583,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (instr->is_in_object()) {
__ mov(FieldOperand(object, offset), value);
if (instr->needs_write_barrier()) {
Register temp = ToRegister(instr->temp());
Register temp = ToRegister(instr->TempAt(0));
// Update the write barrier for the object for in-object properties.
__ RecordWrite(object, offset, value, temp);
}
} else {
Register temp = ToRegister(instr->temp());
Register temp = ToRegister(instr->TempAt(0));
__ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
__ mov(FieldOperand(temp, offset), value);
if (instr->needs_write_barrier()) {
@ -2853,7 +2652,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
@ -2871,7 +2670,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LNumberTagI* instr_;
};
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
@ -2884,7 +2683,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
Register reg = ToRegister(instr->input());
Register reg = ToRegister(instr->InputAt(0));
Register tmp = reg.is(eax) ? ecx : eax;
// Preserve the value of all registers.
@ -2934,9 +2733,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
XMMRegister input_reg = ToDoubleRegister(instr->input());
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
Register tmp = ToRegister(instr->TempAt(0));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
@ -2966,7 +2765,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
@ -2974,7 +2773,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ test(ToRegister(input), Immediate(kSmiTagMask));
@ -3034,7 +2833,7 @@ class DeferredTaggedToI: public LDeferredCode {
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
NearLabel done, heap_number;
Register input_reg = ToRegister(instr->input());
Register input_reg = ToRegister(instr->InputAt(0));
// Heap number map check.
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@ -3077,7 +2876,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
NearLabel deopt;
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cmp(input_reg, 0x80000000u);
@ -3094,7 +2893,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
@ -3114,7 +2913,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
@ -3134,7 +2933,7 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
@ -3147,7 +2946,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
@ -3185,7 +2984,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ bind(&done);
} else {
NearLabel done;
Register temp_reg = ToRegister(instr->temporary());
Register temp_reg = ToRegister(instr->TempAt(0));
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
@ -3264,7 +3063,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
__ test(ToRegister(input), Immediate(kSmiTagMask));
DeoptimizeIf(instr->condition(), instr->environment());
@ -3272,8 +3071,8 @@ void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
@ -3297,15 +3096,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
ASSERT(instr->input()->IsRegister());
Register reg = ToRegister(instr->input());
ASSERT(instr->InputAt(0)->IsRegister());
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, instr->hydrogen()->target());
DeoptimizeIf(not_equal, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@ -3326,7 +3125,7 @@ void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->temp());
Register reg = ToRegister(instr->TempAt(0));
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
@ -3470,7 +3269,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->input();
LOperand* input = instr->InputAt(0);
if (input->IsConstantOperand()) {
__ push(ToImmediate(input));
} else {
@ -3481,7 +3280,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
@ -3504,7 +3303,7 @@ void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->input());
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);

44
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -34,6 +34,7 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
#include "ia32/lithium-gap-resolver-ia32.h"
namespace v8 {
namespace internal {
@ -43,28 +44,6 @@ class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@ -80,10 +59,24 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1) {
osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
Immediate ToImmediate(LOperand* op);
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@ -129,7 +122,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -191,11 +183,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
Immediate ToImmediate(LOperand* op);
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);

461
deps/v8/src/ia32/lithium-gap-resolver-ia32.cc

@ -0,0 +1,461 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "ia32/lithium-gap-resolver-ia32.h"
#include "ia32/lithium-codegen-ia32.h"
namespace v8 {
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), spilled_register_(-1) {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
source_uses_[i] = 0;
destination_uses_[i] = 0;
}
}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(HasBeenReset());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
PerformMove(i);
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
Finish();
ASSERT(HasBeenReset());
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) AddMove(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph. We use operand swaps to resolve cycles,
// which means that a call to PerformMove could change any source operand
// in the move graph.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does
// not miss any). Assume there is a non-blocking move with source A
// and this move is blocked on source B and there is a swap of A and
// B. Then A and B must be involved in the same cycle (or they would
// not be swapped). Since this move's destination is B and there is
// only a single incoming edge to an operand, this move must also be
// involved in the same cycle. In that case, the blocking move will
// be created but will be "pending" when we return from PerformMove.
PerformMove(i);
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// This move's source may have changed due to swaps to resolve cycles and
// so it may now be the last move in the cycle. If so remove it.
if (moves_[index].source()->Equals(destination)) {
RemoveMove(index);
return;
}
// The move may be blocked on a (at most one) pending move, in which case
// we have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
EmitSwap(index);
return;
}
}
// This move is not blocked.
EmitMove(index);
}
void LGapResolver::AddMove(LMoveOperands move) {
LOperand* source = move.source();
if (source->IsRegister()) ++source_uses_[source->index()];
LOperand* destination = move.destination();
if (destination->IsRegister()) ++destination_uses_[destination->index()];
moves_.Add(move);
}
void LGapResolver::RemoveMove(int index) {
LOperand* source = moves_[index].source();
if (source->IsRegister()) {
--source_uses_[source->index()];
ASSERT(source_uses_[source->index()] >= 0);
}
LOperand* destination = moves_[index].destination();
if (destination->IsRegister()) {
--destination_uses_[destination->index()];
ASSERT(destination_uses_[destination->index()] >= 0);
}
moves_[index].Eliminate();
}
int LGapResolver::CountSourceUses(LOperand* operand) {
int count = 0;
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
++count;
}
}
return count;
}
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
}
return no_reg;
}
bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
return true;
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::Finish() {
if (spilled_register_ >= 0) {
__ pop(Register::FromAllocationIndex(spilled_register_));
spilled_register_ = -1;
}
moves_.Rewind(0);
}
void LGapResolver::EnsureRestored(LOperand* operand) {
if (operand->IsRegister() && operand->index() == spilled_register_) {
__ pop(Register::FromAllocationIndex(spilled_register_));
spilled_register_ = -1;
}
}
Register LGapResolver::EnsureTempRegister() {
// 1. We may have already spilled to create a temp register.
if (spilled_register_ >= 0) {
return Register::FromAllocationIndex(spilled_register_);
}
// 2. We may have a free register that we can use without spilling.
Register free = GetFreeRegisterNot(no_reg);
if (!free.is(no_reg)) return free;
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
spilled_register_ = i;
return scratch;
}
}
// 4. Use an arbitrary register. Register 0 is as arbitrary as any other.
Register scratch = Register::FromAllocationIndex(0);
__ push(scratch);
spilled_register_ = 0;
return scratch;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
EnsureRestored(source);
EnsureRestored(destination);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Register src = cgen_->ToRegister(source);
Operand dst = cgen_->ToOperand(destination);
__ mov(dst, src);
} else if (source->IsStackSlot()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
__ mov(dst, src);
} else {
// Spill on demand to use a temporary register for memory-to-memory
// moves.
Register tmp = EnsureTempRegister();
Operand dst = cgen_->ToOperand(destination);
__ mov(tmp, src);
__ mov(dst, tmp);
}
} else if (source->IsConstantOperand()) {
ASSERT(destination->IsRegister() || destination->IsStackSlot());
Immediate src = cgen_->ToImmediate(source);
Operand dst = cgen_->ToOperand(destination);
__ mov(dst, src);
} else if (source->IsDoubleRegister()) {
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
XMMRegister src = cgen_->ToDoubleRegister(source);
Operand dst = cgen_->ToOperand(destination);
__ movdbl(dst, src);
} else if (source->IsDoubleStackSlot()) {
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
__ movdbl(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
__ movdbl(xmm0, src);
__ movdbl(dst, xmm0);
}
} else {
UNREACHABLE();
}
RemoveMove(index);
}
void LGapResolver::EmitSwap(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
EnsureRestored(source);
EnsureRestored(destination);
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister() && destination->IsRegister()) {
// Register-register.
Register src = cgen_->ToRegister(source);
Register dst = cgen_->ToRegister(destination);
__ xchg(dst, src);
} else if ((source->IsRegister() && destination->IsStackSlot()) ||
(source->IsStackSlot() && destination->IsRegister())) {
// Register-memory. Use a free register as a temp if possible. Do not
// spill on demand because the simple spill implementation cannot avoid
// spilling src at this point.
Register tmp = GetFreeRegisterNot(no_reg);
Register reg =
cgen_->ToRegister(source->IsRegister() ? source : destination);
Operand mem =
cgen_->ToOperand(source->IsRegister() ? destination : source);
if (tmp.is(no_reg)) {
__ xor_(reg, mem);
__ xor_(mem, reg);
__ xor_(reg, mem);
} else {
__ mov(tmp, mem);
__ mov(mem, reg);
__ mov(reg, tmp);
}
} else if (source->IsStackSlot() && destination->IsStackSlot()) {
// Memory-memory. Spill on demand to use a temporary. If there is a
// free register after that, use it as a second temporary.
Register tmp0 = EnsureTempRegister();
Register tmp1 = GetFreeRegisterNot(tmp0);
Operand src = cgen_->ToOperand(source);
Operand dst = cgen_->ToOperand(destination);
if (tmp1.is(no_reg)) {
// Only one temp register available to us.
__ mov(tmp0, dst);
__ xor_(tmp0, src);
__ xor_(src, tmp0);
__ xor_(tmp0, src);
__ mov(dst, tmp0);
} else {
__ mov(tmp0, dst);
__ mov(tmp1, src);
__ mov(dst, tmp1);
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
// XMM register-register or register-memory. We rely on having xmm0
// available as a fixed scratch register.
ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
? source
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
__ movdbl(xmm0, other);
__ movdbl(other, reg);
__ movdbl(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
Register tmp = EnsureTempRegister();
Operand src0 = cgen_->ToOperand(source);
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
__ movdbl(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
__ movdbl(src0, xmm0);
} else {
// No other combinations are possible.
UNREACHABLE();
}
// The swap of source and destination has executed a move from source to
// destination.
RemoveMove(index);
// Any unperformed (including pending) move with a source of either
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(source)) {
moves_[i].set_source(destination);
} else if (other_move.Blocks(destination)) {
moves_[i].set_source(source);
}
}
// In addition to swapping the actual uses as sources, we need to update
// the use counts.
if (source->IsRegister() && destination->IsRegister()) {
int temp = source_uses_[source->index()];
source_uses_[source->index()] = source_uses_[destination->index()];
source_uses_[destination->index()] = temp;
} else if (source->IsRegister()) {
// We don't have use counts for non-register operands like destination.
// Compute those counts now.
source_uses_[source->index()] = CountSourceUses(source);
} else if (destination->IsRegister()) {
source_uses_[destination->index()] = CountSourceUses(destination);
}
}
#undef __
} } // namespace v8::internal

110
deps/v8/src/ia32/lithium-gap-resolver-ia32.h

@ -0,0 +1,110 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
#include "v8.h"
#include "lithium-allocator.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// Emit any code necessary at the end of a gap move.
void Finish();
// Add or delete a move from the move graph without emitting any code.
// Used to build up the graph and remove trivial moves.
void AddMove(LMoveOperands move);
void RemoveMove(int index);
// Report the count of uses of operand as a source in a not-yet-performed
// move. Used to rebuild use counts.
int CountSourceUses(LOperand* operand);
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Execute a move by emitting a swap of two operands. The move from
// source to destination is removed from the move graph.
void EmitSwap(int index);
// Ensure that the given operand is not spilled.
void EnsureRestored(LOperand* operand);
// Return a register that can be used as a temp register, spilling
// something if necessary.
Register EnsureTempRegister();
// Return a known free register different from the given one (which could
// be no_reg---returning any free register), or no_reg if there is no such
// register.
Register GetFreeRegisterNot(Register reg);
// Verify that the state is the initial one, ready to resolve a single
// parallel move.
bool HasBeenReset();
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
int source_uses_[Register::kNumAllocatableRegisters];
int destination_uses_[Register::kNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
int spilled_register_;
};
} } // namespace v8::internal
#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_

145
deps/v8/src/ia32/lithium-ia32.cc

@ -90,18 +90,22 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
for (int i = 0; i < I; i++) {
stream->Add(i == 0 ? "= " : " ");
inputs_.at(i)->PrintTo(stream);
}
stream->Add("= ");
inputs_.PrintOperandsTo(stream);
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
if (this->HasResult()) {
this->result()->PrintTo(stream);
stream->Add(" ");
results_.PrintOperandsTo(stream);
}
template<typename T, int N>
void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
for (int i = 0; i < N; i++) {
if (i > 0) stream->Add(" ");
elems_[i]->PrintTo(stream);
}
}
@ -172,22 +176,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
right()->PrintTo(stream);
InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -195,35 +199,35 @@ void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@ -232,14 +236,14 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIs::PrintDataTo(StringStream* stream) {
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@ -253,7 +257,7 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
}
@ -286,14 +290,14 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
@ -316,7 +320,7 @@ int LChunk::GetNextSpillIndex(bool is_double) {
}
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
@ -570,6 +574,13 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value, new LUnallocated(LUnallocated::ANY));
}
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
@ -883,8 +894,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
if (current->IsBranch()) {
instr->set_hydrogen_value(HBranch::cast(current)->value());
if (current->IsBranch() && !instr->IsGoto()) {
// TODO(fschneider): Handle branch instructions uniformly like
// other instructions. This requires us to generate the right
// branch instruction already at the HIR level.
ASSERT(instr->IsControl());
HBranch* branch = HBranch::cast(current);
instr->set_hydrogen_value(branch->value());
HBasicBlock* first = branch->FirstSuccessor();
HBasicBlock* second = branch->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
@ -921,11 +941,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
} else {
op = UseOrConstant(value);
if (op->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(op);
unalloc->set_policy(LUnallocated::ANY);
}
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
@ -945,12 +961,6 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
HBasicBlock* first = instr->FirstSuccessor();
HBasicBlock* second = instr->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
int first_id = first->block_id();
int second_id = second->block_id();
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
@ -958,9 +968,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(),
TempRegister(),
first_id,
second_id);
TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token();
@ -972,17 +980,13 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right),
first_id,
second_id);
UseOrConstantAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right),
first_id,
second_id);
UseRegisterAtStart(right));
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
@ -990,32 +994,26 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
right_operand,
first_id,
second_id);
right_operand);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(compare->value()),
first_id,
second_id);
return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
TempRegister(),
first_id,
second_id);
TempRegister());
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(compare->value()), first_id, second_id);
UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@ -1023,9 +1021,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
temp,
first_id,
second_id);
temp);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
@ -1034,42 +1030,34 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2,
first_id,
second_id);
temp2);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
UseRegisterAtStart(compare->right()),
first_id,
second_id);
UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
new LInstanceOfAndBranch(
UseFixed(instance_of->left(), InstanceofStub::left()),
UseFixed(instance_of->right(), InstanceofStub::right()),
first_id,
second_id);
UseFixed(instance_of->right(), InstanceofStub::right()));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
first_id,
second_id);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
return new LGoto(first_id);
return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) {
return new LGoto(second_id);
return new LGoto(instr->SecondSuccessor()->block_id());
}
}
Abort("Undefined compare before branch");
return NULL;
}
}
return new LBranch(UseRegisterAtStart(v), first_id, second_id);
return new LBranch(UseRegisterAtStart(v));
}
@ -1178,8 +1166,8 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
UseFixed(instr->key(), ecx);
return MarkAsCall(DefineFixed(new LCallKeyed, eax), instr);
LOperand* key = UseFixed(instr->key(), ecx);
return MarkAsCall(DefineFixed(new LCallKeyed(key), eax), instr);
}
@ -1266,10 +1254,11 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
} else if (instr->representation().IsInteger32()) {
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
FixedTemp(edx);
LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
return AssignEnvironment(DefineFixed(new LDivI(value, divisor), eax));
LDivI* result = new LDivI(value, divisor, temp);
return AssignEnvironment(DefineFixed(result, eax));
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::DIV, instr);
@ -1283,10 +1272,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) {
ASSERT(instr->right()->representation().IsInteger32());
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
FixedTemp(edx);
LOperand* temp = FixedTemp(edx);
LOperand* value = UseFixed(instr->left(), eax);
LOperand* divisor = UseRegister(instr->right());
LModI* mod = new LModI(value, divisor);
LModI* mod = new LModI(value, divisor, temp);
LInstruction* result = DefineFixed(mod, edx);
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero))

1064
deps/v8/src/ia32/lithium-ia32.h

File diff suppressed because it is too large

49
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1362,11 +1362,10 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
kind_);
Object* obj;
{ MaybeObject* maybe_obj =
StubCache::ComputeCallMiss(arguments().immediate(), kind_);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
return obj;
}
@ -1685,9 +1684,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
GenerateNameCheck(name, &miss);
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1715,7 +1720,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@ -1723,11 +1728,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::nan_value()));
__ ret((argc + 1) * kPointerSize);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::nan_value()));
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -1758,9 +1768,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
GenerateNameCheck(name, &miss);
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1790,7 +1806,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@ -1798,11 +1814,16 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::empty_string()));
__ ret((argc + 1) * kPointerSize);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::empty_string()));
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
// Restore function name in ecx.
__ Set(ecx, Immediate(Handle<String>(name)));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;

309
deps/v8/src/ic.cc

@ -154,24 +154,20 @@ static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
}
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
return false;
} else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) {
// IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC;
return false;
}
Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
@ -185,20 +181,37 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
// to prototype check failure.
int index = map->IndexInCodeCache(name, target);
if (index >= 0) {
// For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
if (kind == Code::KEYED_LOAD_IC ||
kind == Code::KEYED_STORE_IC ||
kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
// Remove the target from the code cache to avoid hitting the same
// invalid stub again.
map->RemoveFromCodeCache(String::cast(name), target, index);
return true;
}
return false;
}
IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
IC::State state = target->ic_state();
if (state != MONOMORPHIC || !name->IsString()) return state;
if (receiver->IsUndefined() || receiver->IsNull()) return state;
// For keyed load/store/call, the most likely cause of cache failure is
// that the key has changed. We do not distinguish between
// prototype and non-prototype failures for keyed access.
Code::Kind kind = target->kind();
if (kind == Code::KEYED_LOAD_IC ||
kind == Code::KEYED_STORE_IC ||
kind == Code::KEYED_CALL_IC) {
return MONOMORPHIC;
}
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
// again.
// Call stubs handle this later to allow extra IC state
// transitions.
if (kind != Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) {
return MONOMORPHIC_PROTOTYPE_FAILURE;
}
@ -482,6 +495,7 @@ void CallICBase::ReceiverToObject(Handle<Object> object) {
MaybeObject* CallICBase::LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any
@ -527,7 +541,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
// Lookup is valid: Update inline cache and stub cache.
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name);
UpdateCaches(&lookup, state, extra_ic_state, object, name);
}
// Get the property.
@ -576,8 +590,142 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state) {
ASSERT(kind_ == Code::CALL_IC);
if (lookup->type() != CONSTANT_FUNCTION) return false;
JSFunction* function = lookup->GetConstantFunction();
if (!function->shared()->HasBuiltinFunctionId()) return false;
// Fetch the arguments passed to the called function.
const int argc = target()->arguments_count();
Address entry = Top::c_entry_fp(Top::GetCurrentThread());
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
Arguments args(argc + 1,
&Memory::Object_at(fp +
StandardFrameConstants::kCallerSPOffset +
argc * kPointerSize));
switch (function->shared()->builtin_function_id()) {
case kStringCharCodeAt:
case kStringCharAt:
if (object->IsString()) {
String* string = String::cast(*object);
// Check that there's the right wrapper in the receiver slot.
ASSERT(string == JSValue::cast(args[0])->value());
// If we're in the default (fastest) state and the index is
// out of bounds, update the state to record this fact.
if (*extra_ic_state == DEFAULT_STRING_STUB &&
argc >= 1 && args[1]->IsNumber()) {
double index;
if (args[1]->IsSmi()) {
index = Smi::cast(args[1])->value();
} else {
ASSERT(args[1]->IsHeapNumber());
index = DoubleToInteger(HeapNumber::cast(args[1])->value());
}
if (index < 0 || index >= string->length()) {
*extra_ic_state = STRING_INDEX_OUT_OF_BOUNDS;
return true;
}
}
}
break;
default:
return false;
}
return false;
}
MaybeObject* CallICBase::ComputeMonomorphicStub(
LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
MaybeObject* maybe_code = NULL;
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
maybe_code = StubCache::ComputeCallField(argc,
in_loop,
kind_,
*name,
*object,
lookup->holder(),
index);
break;
}
case CONSTANT_FUNCTION: {
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
maybe_code = StubCache::ComputeCallConstant(argc,
in_loop,
kind_,
extra_ic_state,
*name,
*object,
lookup->holder(),
function);
break;
}
case NORMAL: {
if (!object->IsJSObject()) return NULL;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return NULL;
JSFunction* function = JSFunction::cast(cell->value());
maybe_code = StubCache::ComputeCallGlobal(argc,
in_loop,
kind_,
*name,
*receiver,
global,
cell,
function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return NULL;
maybe_code = StubCache::ComputeCallNormal(argc,
in_loop,
kind_,
*name,
*receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
maybe_code = StubCache::ComputeCallInterceptor(argc,
kind_,
*name,
*object,
lookup->holder());
break;
}
default:
maybe_code = NULL;
break;
}
return maybe_code;
}
void CallICBase::UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
// Bail out if we didn't find a result.
@ -594,90 +742,44 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
MaybeObject* maybe_code = NULL;
Object* code;
bool had_proto_failure = false;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
maybe_code = StubCache::ComputeCallPreMonomorphic(argc, in_loop, kind_);
} else if (state == MONOMORPHIC) {
maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
} else {
// Compute monomorphic stub.
switch (lookup->type()) {
case FIELD: {
int index = lookup->GetFieldIndex();
maybe_code = StubCache::ComputeCallField(argc,
in_loop,
kind_,
*name,
*object,
lookup->holder(),
index);
break;
}
case CONSTANT_FUNCTION: {
// Get the constant function and compute the code stub for this
// call; used for rewriting to monomorphic state and making sure
// that the code stub is in the stub cache.
JSFunction* function = lookup->GetConstantFunction();
maybe_code = StubCache::ComputeCallConstant(argc,
in_loop,
kind_,
*name,
*object,
lookup->holder(),
function);
break;
}
case NORMAL: {
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return;
JSFunction* function = JSFunction::cast(cell->value());
maybe_code = StubCache::ComputeCallGlobal(argc,
in_loop,
kind_,
*name,
*receiver,
global,
cell,
function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
maybe_code = StubCache::ComputeCallNormal(argc,
in_loop,
kind_,
*name,
*receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
maybe_code = StubCache::ComputeCallInterceptor(argc,
kind_,
*name,
*object,
lookup->holder());
break;
}
default:
return;
if (kind_ == Code::CALL_IC &&
TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
} else if (kind_ == Code::CALL_IC &&
TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
had_proto_failure = true;
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
} else {
maybe_code = StubCache::ComputeCallMegamorphic(argc, in_loop, kind_);
}
} else {
maybe_code = ComputeMonomorphicStub(lookup,
state,
extra_ic_state,
object,
name);
}
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
Object* code;
if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
// Patch the call site depending on the state of the cache.
@ -696,7 +798,9 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
StubCache::Set(*name, map, Code::cast(code));
}
USE(had_proto_failure);
#ifdef DEBUG
if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
name, state, target(), in_loop ? " (in-loop)" : "");
#endif
@ -707,7 +811,10 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<Object> object,
Handle<Object> key) {
if (key->IsSymbol()) {
return CallICBase::LoadFunction(state, object, Handle<String>::cast(key));
return CallICBase::LoadFunction(state,
Code::kNoExtraICState,
object,
Handle<String>::cast(key));
}
if (object->IsUndefined() || object->IsNull()) {
@ -1641,11 +1748,13 @@ MUST_USE_RESULT MaybeObject* CallIC_Miss(Arguments args) {
ASSERT(args.length() == 2);
CallIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
MaybeObject* maybe_result = ic.LoadFunction(state,
extra_ic_state,
args.at<Object>(0),
args.at<String>(1));
Object* result;
{ MaybeObject* maybe_result =
ic.LoadFunction(state, args.at<Object>(0), args.at<String>(1));
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (!maybe_result->ToObject(&result)) return maybe_result;
// The first time the inline cache is updated may be the first time the
// function it references gets called. If the function was lazily compiled

13
deps/v8/src/ic.h

@ -193,16 +193,29 @@ class CallICBase: public IC {
public:
MUST_USE_RESULT MaybeObject* LoadFunction(State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
protected:
Code::Kind kind_;
bool TryUpdateExtraICState(LookupResult* lookup,
Handle<Object> object,
Code::ExtraICState* extra_ic_state);
MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
State state,
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name);

63
deps/v8/src/inspector.cc

@ -0,0 +1,63 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "inspector.h"
namespace v8 {
namespace internal {
#ifdef INSPECTOR
//============================================================================
// The Inspector.
void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
// Dump the object pointer.
OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
if (obj->IsHeapObject()) {
HeapObject *hobj = HeapObject::cast(obj);
OS::FPrint(out, " size %d :", hobj->Size());
}
// Dump each object classification that matches this object.
#define FOR_EACH_TYPE(type) \
if (obj->Is##type()) { \
OS::FPrint(out, " %s", #type); \
}
OBJECT_TYPE_LIST(FOR_EACH_TYPE)
HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE)
#undef FOR_EACH_TYPE
}
#endif // INSPECTOR
} } // namespace v8::internal

62
deps/v8/src/inspector.h

@ -0,0 +1,62 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_INSPECTOR_H_
#define V8_INSPECTOR_H_
// Only build this code if we're configured with the INSPECTOR.
#ifdef INSPECTOR
#include "v8.h"
#include "objects.h"
namespace v8 {
namespace internal {
class Inspector {
public:
static void DumpObjectType(FILE* out, Object *obj, bool print_more);
static void DumpObjectType(FILE* out, Object *obj) {
DumpObjectType(out, obj, false);
}
static void DumpObjectType(Object *obj, bool print_more) {
DumpObjectType(stdout, obj, print_more);
}
static void DumpObjectType(Object *obj) {
DumpObjectType(stdout, obj, false);
}
};
} } // namespace v8::internal
#endif // INSPECTOR
#endif // V8_INSPECTOR_H_

12
deps/v8/src/lithium-allocator.cc

@ -745,10 +745,10 @@ void LAllocator::AddConstraintsGapMove(int index,
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands cur = move_operands->at(i);
LOperand* cur_to = cur.to();
LOperand* cur_to = cur.destination();
if (cur_to->IsUnallocated()) {
if (cur_to->VirtualRegister() == from->VirtualRegister()) {
move->AddMove(cur.from(), to);
move->AddMove(cur.source(), to);
return;
}
}
@ -896,8 +896,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
if (cur->IsIgnored()) continue;
LOperand* from = cur->from();
LOperand* to = cur->to();
LOperand* from = cur->source();
LOperand* to = cur->destination();
HPhi* phi = LookupPhi(to);
LOperand* hint = to;
if (phi != NULL) {
@ -1217,9 +1217,9 @@ void LAllocator::BuildLiveRanges() {
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
for (int j = 0; j < move->move_operands()->length(); ++j) {
LOperand* to = move->move_operands()->at(j).to();
LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
hint = move->move_operands()->at(j).from();
hint = move->move_operands()->at(j).source();
phi_operand = to;
break;
}

48
deps/v8/src/lithium-allocator.h

@ -321,27 +321,49 @@ class LUnallocated: public LOperand {
class LMoveOperands BASE_EMBEDDED {
public:
LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
LMoveOperands(LOperand* source, LOperand* destination)
: source_(source), destination_(destination) {
}
LOperand* source() const { return source_; }
void set_source(LOperand* operand) { source_ = operand; }
LOperand* destination() const { return destination_; }
void set_destination(LOperand* operand) { destination_ = operand; }
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const {
return destination_ == NULL && source_ != NULL;
}
LOperand* from() const { return from_; }
LOperand* to() const { return to_; }
// True if this move a move into the given destination operand.
bool Blocks(LOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
}
// A move is redundant if it's been eliminated, if its source and
// destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
return IsEliminated() || from_->Equals(to_) || IsIgnored();
return IsEliminated() || source_->Equals(destination_) || IsIgnored();
}
bool IsEliminated() const { return from_ == NULL; }
bool IsIgnored() const {
if (to_ != NULL && to_->IsUnallocated() &&
LUnallocated::cast(to_)->HasIgnorePolicy()) {
return true;
}
return false;
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
}
void Eliminate() { from_ = to_ = NULL; }
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
bool IsEliminated() const {
ASSERT(source_ != NULL || destination_ == NULL);
return source_ == NULL;
}
private:
LOperand* from_;
LOperand* to_;
LOperand* source_;
LOperand* destination_;
};

19
deps/v8/src/lithium.cc

@ -39,18 +39,21 @@ bool LParallelMove::IsRedundant() const {
void LParallelMove::PrintDataTo(StringStream* stream) const {
for (int i = move_operands_.length() - 1; i >= 0; --i) {
bool first = true;
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsEliminated()) {
LOperand* from = move_operands_[i].from();
LOperand* to = move_operands_[i].to();
if (from->Equals(to)) {
to->PrintTo(stream);
LOperand* source = move_operands_[i].source();
LOperand* destination = move_operands_[i].destination();
if (!first) stream->Add(" ");
first = false;
if (source->Equals(destination)) {
destination->PrintTo(stream);
} else {
to->PrintTo(stream);
destination->PrintTo(stream);
stream->Add(" = ");
from->PrintTo(stream);
source->PrintTo(stream);
}
stream->Add("; ");
stream->Add(";");
}
}
}

3
deps/v8/src/lithium.h

@ -35,9 +35,6 @@
namespace v8 {
namespace internal {
class LCodeGen;
class Translation;
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }

12
deps/v8/src/mark-compact.cc

@ -30,6 +30,7 @@
#include "compilation-cache.h"
#include "execution.h"
#include "heap-profiler.h"
#include "gdb-jit.h"
#include "global-handles.h"
#include "ic-inl.h"
#include "mark-compact.h"
@ -125,6 +126,12 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
if (!Heap::map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
compacting_collection_ = false;
}
#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
@ -2906,6 +2913,11 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_GDB_JIT_INTERFACE
if (obj->IsCode()) {
GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
}
#endif
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(CodeDeleteEvent(obj->address()));

13
deps/v8/src/objects-debug.cc

@ -670,16 +670,17 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() {
int finger = Smi::cast(get(kFingerIndex))->value();
ASSERT(kEntriesIndex <= finger);
ASSERT(finger < size || finger == kEntriesIndex);
ASSERT((finger < size) || (finger == kEntriesIndex && finger == size));
ASSERT_EQ(0, finger % kEntrySize);
if (FLAG_enable_slow_asserts) {
STATIC_ASSERT(2 == kEntrySize);
for (int i = kEntriesIndex; i < length(); i += kEntrySize) {
for (int i = kEntriesIndex; i < size; i++) {
ASSERT(!get(i)->IsTheHole());
get(i)->Verify();
}
for (int i = size; i < length(); i++) {
ASSERT(get(i)->IsTheHole());
get(i)->Verify();
get(i + 1)->Verify();
// Key and value must be either both the holes, or not.
ASSERT(get(i)->IsTheHole() == get(i + 1)->IsTheHole());
}
}
}

49
deps/v8/src/objects-inl.h

@ -1978,13 +1978,13 @@ void ExternalTwoByteString::set_resource(
void JSFunctionResultCache::MakeZeroSize() {
set(kFingerIndex, Smi::FromInt(kEntriesIndex));
set(kCacheSizeIndex, Smi::FromInt(kEntriesIndex));
set_finger_index(kEntriesIndex);
set_size(kEntriesIndex);
}
void JSFunctionResultCache::Clear() {
int cache_size = Smi::cast(get(kCacheSizeIndex))->value();
int cache_size = size();
Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
MemsetPointer(entries_start,
Heap::the_hole_value(),
@ -1993,6 +1993,26 @@ void JSFunctionResultCache::Clear() {
}
int JSFunctionResultCache::size() {
return Smi::cast(get(kCacheSizeIndex))->value();
}
void JSFunctionResultCache::set_size(int size) {
set(kCacheSizeIndex, Smi::FromInt(size));
}
int JSFunctionResultCache::finger_index() {
return Smi::cast(get(kFingerIndex))->value();
}
void JSFunctionResultCache::set_finger_index(int finger_index) {
set(kFingerIndex, Smi::FromInt(finger_index));
}
byte ByteArray::get(int index) {
ASSERT(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@ -2396,6 +2416,12 @@ InlineCacheState Code::ic_state() {
}
Code::ExtraICState Code::extra_ic_state() {
ASSERT(is_inline_cache_stub());
return ExtractExtraICStateFromFlags(flags());
}
PropertyType Code::type() {
ASSERT(ic_state() == MONOMORPHIC);
return ExtractTypeFromFlags(flags());
@ -2572,14 +2598,20 @@ bool Code::is_inline_cache_stub() {
Code::Flags Code::ComputeFlags(Kind kind,
InLoopFlag in_loop,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
PropertyType type,
int argc,
InlineCacheHolderFlag holder) {
// Extra IC state is only allowed for monomorphic call IC stubs.
ASSERT(extra_ic_state == kNoExtraICState ||
(kind == CALL_IC && (ic_state == MONOMORPHIC ||
ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)));
// Compute the bit mask.
int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask;
bits |= ic_state << kFlagsICStateShift;
bits |= type << kFlagsTypeShift;
bits |= extra_ic_state << kFlagsExtraICStateShift;
bits |= argc << kFlagsArgumentsCountShift;
if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
// Cast to flags and validate result before returning it.
@ -2588,6 +2620,7 @@ Code::Flags Code::ComputeFlags(Kind kind,
ASSERT(ExtractICStateFromFlags(result) == ic_state);
ASSERT(ExtractICInLoopFromFlags(result) == in_loop);
ASSERT(ExtractTypeFromFlags(result) == type);
ASSERT(ExtractExtraICStateFromFlags(result) == extra_ic_state);
ASSERT(ExtractArgumentsCountFromFlags(result) == argc);
return result;
}
@ -2595,10 +2628,12 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
PropertyType type,
ExtraICState extra_ic_state,
InlineCacheHolderFlag holder,
InLoopFlag in_loop,
int argc) {
return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
return ComputeFlags(
kind, in_loop, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@ -2614,6 +2649,12 @@ InlineCacheState Code::ExtractICStateFromFlags(Flags flags) {
}
Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) {
int bits = (flags & kFlagsExtraICStateMask) >> kFlagsExtraICStateShift;
return static_cast<ExtraICState>(bits);
}
InLoopFlag Code::ExtractICInLoopFromFlags(Flags flags) {
int bits = (flags & kFlagsICInLoopMask);
return bits != 0 ? IN_LOOP : NOT_IN_LOOP;

162
deps/v8/src/objects.h

@ -624,6 +624,71 @@ class MaybeObject BASE_EMBEDDED {
#endif
};
#define OBJECT_TYPE_LIST(V) \
V(Smi) \
V(HeapObject) \
V(Number) \
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
V(String) \
V(Symbol) \
V(SeqString) \
V(ExternalString) \
V(ConsString) \
V(ExternalTwoByteString) \
V(ExternalAsciiString) \
V(SeqTwoByteString) \
V(SeqAsciiString) \
\
V(PixelArray) \
V(ExternalArray) \
V(ExternalByteArray) \
V(ExternalUnsignedByteArray) \
V(ExternalShortArray) \
V(ExternalUnsignedShortArray) \
V(ExternalIntArray) \
V(ExternalUnsignedIntArray) \
V(ExternalFloatArray) \
V(ByteArray) \
V(JSObject) \
V(JSContextExtensionObject) \
V(Map) \
V(DescriptorArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(FixedArray) \
V(Context) \
V(CatchContext) \
V(GlobalContext) \
V(JSFunction) \
V(Code) \
V(Oddball) \
V(SharedFunctionInfo) \
V(JSValue) \
V(StringWrapper) \
V(Proxy) \
V(Boolean) \
V(JSArray) \
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
V(SymbolTable) \
V(JSFunctionResultCache) \
V(NormalizedMapCache) \
V(CompilationCacheTable) \
V(CodeCacheHashTable) \
V(MapCache) \
V(Primitive) \
V(GlobalObject) \
V(JSGlobalObject) \
V(JSBuiltinsObject) \
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
V(JSGlobalPropertyCell) \
// Object is the abstract superclass for all classes in the
// object hierarchy.
// Object does not use any virtual functions to avoid the
@ -633,67 +698,10 @@ class MaybeObject BASE_EMBEDDED {
class Object : public MaybeObject {
public:
// Type testing.
inline bool IsSmi();
inline bool IsHeapObject();
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
// See objects-inl.h for more details
inline bool IsSeqString();
inline bool IsExternalString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
inline bool IsConsString();
inline bool IsNumber();
inline bool IsByteArray();
inline bool IsPixelArray();
inline bool IsExternalArray();
inline bool IsExternalByteArray();
inline bool IsExternalUnsignedByteArray();
inline bool IsExternalShortArray();
inline bool IsExternalUnsignedShortArray();
inline bool IsExternalIntArray();
inline bool IsExternalUnsignedIntArray();
inline bool IsExternalFloatArray();
inline bool IsJSObject();
inline bool IsJSContextExtensionObject();
inline bool IsMap();
inline bool IsFixedArray();
inline bool IsDescriptorArray();
inline bool IsDeoptimizationInputData();
inline bool IsDeoptimizationOutputData();
inline bool IsContext();
inline bool IsCatchContext();
inline bool IsGlobalContext();
inline bool IsJSFunction();
inline bool IsCode();
inline bool IsOddball();
inline bool IsSharedFunctionInfo();
inline bool IsJSValue();
inline bool IsStringWrapper();
inline bool IsProxy();
inline bool IsBoolean();
inline bool IsJSArray();
inline bool IsJSRegExp();
inline bool IsHashTable();
inline bool IsDictionary();
inline bool IsSymbolTable();
inline bool IsJSFunctionResultCache();
inline bool IsNormalizedMapCache();
inline bool IsCompilationCacheTable();
inline bool IsCodeCacheHashTable();
inline bool IsMapCache();
inline bool IsPrimitive();
inline bool IsGlobalObject();
inline bool IsJSGlobalObject();
inline bool IsJSBuiltinsObject();
inline bool IsJSGlobalProxy();
inline bool IsUndetectableObject();
inline bool IsAccessCheckNeeded();
inline bool IsJSGlobalPropertyCell();
#define IS_TYPE_FUNCTION_DECL(type_) inline bool Is##type_();
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
// Returns true if this object is an instance of the specified
// function template.
@ -2613,6 +2621,11 @@ class JSFunctionResultCache: public FixedArray {
inline void MakeZeroSize();
inline void Clear();
inline int size();
inline void set_size(int size);
inline int finger_index();
inline void set_finger_index(int finger_index);
// Casting
static inline JSFunctionResultCache* cast(Object* obj);
@ -3163,6 +3176,10 @@ class Code: public HeapObject {
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
typedef int ExtraICState;
static const ExtraICState kNoExtraICState = 0;
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* Kind2String(Kind kind);
@ -3198,6 +3215,7 @@ class Code: public HeapObject {
// [flags]: Access to specific code flags.
inline Kind kind();
inline InlineCacheState ic_state(); // Only valid for IC stubs.
inline ExtraICState extra_ic_state(); // Only valid for IC stubs.
inline InLoopFlag ic_in_loop(); // Only valid for IC stubs.
inline PropertyType type(); // Only valid for monomorphic IC stubs.
inline int arguments_count(); // Only valid for call IC stubs.
@ -3282,22 +3300,26 @@ class Code: public HeapObject {
Map* FindFirstMap();
// Flags operations.
static inline Flags ComputeFlags(Kind kind,
InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
PropertyType type = NORMAL,
int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeFlags(
Kind kind,
InLoopFlag in_loop = NOT_IN_LOOP,
InlineCacheState ic_state = UNINITIALIZED,
ExtraICState extra_ic_state = kNoExtraICState,
PropertyType type = NORMAL,
int argc = -1,
InlineCacheHolderFlag holder = OWN_MAP);
static inline Flags ComputeMonomorphicFlags(
Kind kind,
PropertyType type,
ExtraICState extra_ic_state = kNoExtraICState,
InlineCacheHolderFlag holder = OWN_MAP,
InLoopFlag in_loop = NOT_IN_LOOP,
int argc = -1);
static inline Kind ExtractKindFromFlags(Flags flags);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
static inline PropertyType ExtractTypeFromFlags(Flags flags);
static inline int ExtractArgumentsCountFromFlags(Flags flags);
@ -3418,14 +3440,16 @@ class Code: public HeapObject {
static const int kFlagsTypeShift = 4;
static const int kFlagsKindShift = 7;
static const int kFlagsICHolderShift = 11;
static const int kFlagsArgumentsCountShift = 12;
static const int kFlagsExtraICStateShift = 12;
static const int kFlagsArgumentsCountShift = 14;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
static const int kFlagsTypeMask = 0x00000070; // 00001110000
static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
static const int kFlagsArgumentsCountMask = 0xFFFFF000;
static const int kFlagsExtraICStateMask = 0x00003000;
static const int kFlagsArgumentsCountMask = 0xFFFFC000;
static const int kFlagsNotUsedInLookup =
(kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);

2
deps/v8/src/platform-win32.cc

@ -1474,7 +1474,7 @@ Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
void Thread::set_name(const char* name) {
strncpy_s(name_, sizeof(name_), name, strlen(name));
OS::StrNCpy(Vector<char>(name_, sizeof(name_)), name, strlen(name));
name_[sizeof(name_) - 1] = '\0';
}

2
deps/v8/src/preparse-data.h

@ -39,7 +39,7 @@ class PreparseDataConstants : public AllStatic {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
static const unsigned kCurrentVersion = 5;
static const unsigned kCurrentVersion = 6;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;

1
deps/v8/src/runtime-profiler.cc

@ -134,6 +134,7 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
static bool IsOptimizable(JSFunction* function) {
if (Heap::InNewSpace(function)) return false;
Code* code = function->code();
return code->kind() == Code::FUNCTION && code->optimizable();
}

112
deps/v8/src/runtime.cc

@ -10654,51 +10654,12 @@ static MaybeObject* Runtime_Abort(Arguments args) {
}
MUST_USE_RESULT static MaybeObject* CacheMiss(FixedArray* cache_obj,
int index,
Object* key_obj) {
ASSERT(index % 2 == 0); // index of the key
ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
ASSERT(index < cache_obj->length());
HandleScope scope;
Handle<FixedArray> cache(cache_obj);
Handle<Object> key(key_obj);
Handle<JSFunction> factory(JSFunction::cast(
cache->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<Object> receiver(Top::global_context()->global());
Handle<Object> value;
{
// This handle is nor shared, nor used later, so it's safe.
Object** argv[] = { key.location() };
bool pending_exception = false;
value = Execution::Call(factory,
receiver,
1,
argv,
&pending_exception);
if (pending_exception) return Failure::Exception();
}
cache->set(index, *key);
cache->set(index + 1, *value);
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(index));
return *value;
}
static MaybeObject* Runtime_GetFromCache(Arguments args) {
// This is only called from codegen, so checks might be more lax.
CONVERT_CHECKED(FixedArray, cache, args[0]);
CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
Object* key = args[1];
const int finger_index =
Smi::cast(cache->get(JSFunctionResultCache::kFingerIndex))->value();
int finger_index = cache->finger_index();
Object* o = cache->get(finger_index);
if (o == key) {
// The fastest case: hit the same place again.
@ -10710,35 +10671,78 @@ static MaybeObject* Runtime_GetFromCache(Arguments args) {
i -= 2) {
o = cache->get(i);
if (o == key) {
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
cache->set_finger_index(i);
return cache->get(i + 1);
}
}
const int size =
Smi::cast(cache->get(JSFunctionResultCache::kCacheSizeIndex))->value();
int size = cache->size();
ASSERT(size <= cache->length());
for (int i = size - 2; i > finger_index; i -= 2) {
o = cache->get(i);
if (o == key) {
cache->set(JSFunctionResultCache::kFingerIndex, Smi::FromInt(i));
cache->set_finger_index(i);
return cache->get(i + 1);
}
}
// Cache miss. If we have spare room, put new data into it, otherwise
// evict post finger entry which must be least recently used.
if (size < cache->length()) {
cache->set(JSFunctionResultCache::kCacheSizeIndex, Smi::FromInt(size + 2));
return CacheMiss(cache, size, key);
// There is no value in the cache. Invoke the function and cache result.
HandleScope scope;
Handle<JSFunctionResultCache> cache_handle(cache);
Handle<Object> key_handle(key);
Handle<Object> value;
{
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
Handle<Object> receiver(Top::global_context()->global());
// This handle is nor shared, nor used later, so it's safe.
Object** argv[] = { key_handle.location() };
bool pending_exception = false;
value = Execution::Call(factory,
receiver,
1,
argv,
&pending_exception);
if (pending_exception) return Failure::Exception();
}
#ifdef DEBUG
cache_handle->JSFunctionResultCacheVerify();
#endif
// Function invocation may have cleared the cache. Reread all the data.
finger_index = cache_handle->finger_index();
size = cache_handle->size();
// If we have spare room, put new data into it, otherwise evict post finger
// entry which is likely to be the least recently used.
int index = -1;
if (size < cache_handle->length()) {
cache_handle->set_size(size + JSFunctionResultCache::kEntrySize);
index = size;
} else {
int target_index = finger_index + JSFunctionResultCache::kEntrySize;
if (target_index == cache->length()) {
target_index = JSFunctionResultCache::kEntriesIndex;
index = finger_index + JSFunctionResultCache::kEntrySize;
if (index == cache_handle->length()) {
index = JSFunctionResultCache::kEntriesIndex;
}
return CacheMiss(cache, target_index, key);
}
ASSERT(index % 2 == 0);
ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
ASSERT(index < cache_handle->length());
cache_handle->set(index, *key_handle);
cache_handle->set(index + 1, *value);
cache_handle->set_finger_index(index);
#ifdef DEBUG
cache_handle->JSFunctionResultCacheVerify();
#endif
return *value;
}
#ifdef DEBUG

5
deps/v8/src/scopeinfo.cc

@ -152,19 +152,18 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
//
// - function name
//
// - calls eval boolean flag
//
// - number of variables in the context object (smi) (= function context
// slot index + 1)
// - list of pairs (name, Var mode) of context-allocated variables (starting
// with context slot 0)
// - NULL (sentinel)
//
// - number of parameters (smi)
// - list of parameter names (starting with parameter 0 first)
// - NULL (sentinel)
//
// - number of variables on the stack (smi)
// - list of names of stack-allocated variables (starting with stack slot 0)
// - NULL (sentinel)
// The ScopeInfo representation could be simplified and the ScopeInfo
// re-implemented (with almost the same interface). Here is a

62
deps/v8/src/scopes.cc

@ -154,6 +154,24 @@ Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
if (scope_info->HasHeapAllocatedLocals()) {
num_heap_slots_ = scope_info_->NumberOfContextSlots();
}
// This scope's arguments shadow (if present) is context-allocated if an inner
// scope accesses this one's parameters. Allocate the arguments_shadow_
// variable if necessary.
Variable::Mode mode;
int arguments_shadow_index =
scope_info_->ContextSlotIndex(Heap::arguments_shadow_symbol(), &mode);
if (arguments_shadow_index >= 0) {
ASSERT(mode == Variable::INTERNAL);
arguments_shadow_ = new Variable(this,
Factory::arguments_shadow_symbol(),
Variable::INTERNAL,
true,
Variable::ARGUMENTS);
arguments_shadow_->set_rewrite(
new Slot(arguments_shadow_, Slot::CONTEXT, arguments_shadow_index));
arguments_shadow_->set_is_used(true);
}
}
@ -239,21 +257,49 @@ Variable* Scope::LocalLookup(Handle<String> name) {
// If the scope is resolved, we can find a variable in serialized scope info.
// We should never lookup 'arguments' in this scope
// as it is impllicitly present in any scope.
// as it is implicitly present in any scope.
ASSERT(*name != *Factory::arguments_symbol());
// Assert that there is no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
// Check context slot lookup.
Variable::Mode mode;
int index = scope_info_->ContextSlotIndex(*name, &mode);
if (index < 0) {
return NULL;
if (index >= 0) {
Variable* var =
variables_.Declare(this, name, mode, true, Variable::NORMAL);
var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
return var;
}
// Check that there is no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL);
var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
return var;
index = scope_info_->ParameterIndex(*name);
if (index >= 0) {
// ".arguments" must be present in context slots.
ASSERT(arguments_shadow_ != NULL);
Variable* var =
variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
Property* rewrite =
new Property(new VariableProxy(arguments_shadow_),
new Literal(Handle<Object>(Smi::FromInt(index))),
RelocInfo::kNoPosition,
Property::SYNTHETIC);
rewrite->set_is_arguments_access(true);
var->set_rewrite(rewrite);
return var;
}
index = scope_info_->FunctionContextSlotIndex(*name);
if (index >= 0) {
// Check that there is no local slot with the given name.
ASSERT(scope_info_->StackSlotIndex(*name) < 0);
Variable* var =
variables_.Declare(this, name, Variable::VAR, true, Variable::NORMAL);
var->set_rewrite(new Slot(var, Slot::CONTEXT, index));
return var;
}
return NULL;
}

171
deps/v8/src/stub-cache.cc

@ -29,6 +29,7 @@
#include "api.h"
#include "arguments.h"
#include "gdb-jit.h"
#include "ic-inl.h"
#include "stub-cache.h"
#include "vm-state-inl.h"
@ -122,6 +123,7 @@ MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
@ -146,6 +148,7 @@ MaybeObject* StubCache::ComputeLoadField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -171,6 +174,7 @@ MaybeObject* StubCache::ComputeLoadCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -196,6 +200,7 @@ MaybeObject* StubCache::ComputeLoadConstant(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -219,6 +224,7 @@ MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -252,6 +258,7 @@ MaybeObject* StubCache::ComputeLoadGlobal(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -276,6 +283,7 @@ MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -301,6 +309,7 @@ MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -325,6 +334,7 @@ MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -350,6 +360,7 @@ MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -373,6 +384,7 @@ MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -395,6 +407,7 @@ MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -416,6 +429,7 @@ MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -461,6 +475,7 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -509,6 +524,7 @@ MaybeObject* StubCache::ComputeStoreGlobal(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -532,6 +548,7 @@ MaybeObject* StubCache::ComputeStoreCallback(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -554,6 +571,7 @@ MaybeObject* StubCache::ComputeStoreInterceptor(String* name,
if (!maybe_code->ToObject(&code)) return maybe_code;
}
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -579,6 +597,7 @@ MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
}
PROFILE(CodeCreateEvent(
Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
receiver->UpdateMapCodeCache(name, Code::cast(code));
@ -594,6 +613,7 @@ MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
MaybeObject* StubCache::ComputeCallConstant(int argc,
InLoopFlag in_loop,
Code::Kind kind,
Code::ExtraICState extra_ic_state,
String* name,
Object* object,
JSObject* holder,
@ -613,12 +633,12 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
check = BOOLEAN_CHECK;
}
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
cache_holder,
in_loop,
argc);
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
CONSTANT_FUNCTION,
extra_ic_state,
cache_holder,
in_loop,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@ -627,7 +647,8 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
CallStubCompiler compiler(
argc, in_loop, kind, extra_ic_state, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallConstant(object, holder, function, name, check);
if (!maybe_code->ToObject(&code)) return maybe_code;
@ -636,6 +657,7 @@ MaybeObject* StubCache::ComputeCallConstant(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@ -667,12 +689,14 @@ MaybeObject* StubCache::ComputeCallField(int argc,
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
FIELD,
Code::kNoExtraICState,
cache_holder,
in_loop,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
CallStubCompiler compiler(
argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallField(JSObject::cast(object),
holder,
@ -683,6 +707,7 @@ MaybeObject* StubCache::ComputeCallField(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@ -710,15 +735,16 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
object = holder;
}
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
cache_holder,
NOT_IN_LOOP,
argc);
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
INTERCEPTOR,
Code::kNoExtraICState,
cache_holder,
NOT_IN_LOOP,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
CallStubCompiler compiler(
argc, NOT_IN_LOOP, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@ -726,6 +752,7 @@ MaybeObject* StubCache::ComputeCallInterceptor(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@ -760,12 +787,12 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(receiver, holder);
JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind,
NORMAL,
cache_holder,
in_loop,
argc);
Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
NORMAL,
Code::kNoExtraICState,
cache_holder,
in_loop,
argc);
Object* code = map_holder->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
@ -773,7 +800,8 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
CallStubCompiler compiler(
argc, in_loop, kind, Code::kNoExtraICState, cache_holder);
{ MaybeObject* maybe_code =
compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
@ -781,6 +809,7 @@ MaybeObject* StubCache::ComputeCallGlobal(int argc,
ASSERT_EQ(flags, Code::cast(code)->flags());
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
Object* result;
{ MaybeObject* maybe_result =
map_holder->UpdateMapCodeCache(name, Code::cast(code));
@ -839,8 +868,12 @@ static MaybeObject* FillCache(MaybeObject* maybe_code) {
Code* StubCache::FindCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
UNINITIALIZED,
Code::kNoExtraICState,
NORMAL,
argc);
Object* result = ProbeCache(flags)->ToObjectUnchecked();
ASSERT(!result->IsUndefined());
// This might be called during the marking phase of the collector
@ -852,8 +885,12 @@ Code* StubCache::FindCallInitialize(int argc,
MaybeObject* StubCache::ComputeCallInitialize(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, in_loop, UNINITIALIZED, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
UNINITIALIZED,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -895,8 +932,12 @@ Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc,
MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, in_loop, PREMONOMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
PREMONOMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -910,8 +951,12 @@ MaybeObject* StubCache::ComputeCallPreMonomorphic(int argc,
MaybeObject* StubCache::ComputeCallNormal(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, in_loop, MONOMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
MONOMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -925,8 +970,12 @@ MaybeObject* StubCache::ComputeCallNormal(int argc,
MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
InLoopFlag in_loop,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, in_loop, MEGAMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
in_loop,
MEGAMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -940,8 +989,13 @@ MaybeObject* StubCache::ComputeCallMegamorphic(int argc,
MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
// MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
// and monomorphic stubs are not mixed up together in the stub cache.
Code::Flags flags = Code::ComputeFlags(
kind, NOT_IN_LOOP, MONOMORPHIC_PROTOTYPE_FAILURE, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC_PROTOTYPE_FAILURE,
Code::kNoExtraICState,
NORMAL,
argc,
OWN_MAP);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -954,8 +1008,12 @@ MaybeObject* StubCache::ComputeCallMiss(int argc, Code::Kind kind) {
#ifdef ENABLE_DEBUGGER_SUPPORT
MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, DEBUG_BREAK, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_BREAK,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -968,12 +1026,12 @@ MaybeObject* StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) {
MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(int argc,
Code::Kind kind) {
Code::Flags flags =
Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_PREPARE_STEP_IN,
NORMAL,
argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
DEBUG_PREPARE_STEP_IN,
Code::kNoExtraICState,
NORMAL,
argc);
Object* probe;
{ MaybeObject* maybe_probe = ProbeCache(flags);
if (!maybe_probe->ToObject(&probe)) return maybe_probe;
@ -1257,6 +1315,7 @@ MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
return result;
}
@ -1282,6 +1341,7 @@ MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
return result;
}
@ -1304,6 +1364,7 @@ MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
return result;
}
@ -1328,6 +1389,7 @@ MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
return result;
}
@ -1350,6 +1412,7 @@ MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
USE(code);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
code, code->arguments_count()));
GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
return result;
}
@ -1449,6 +1512,9 @@ MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
name,
Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@ -1461,6 +1527,9 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
name,
Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@ -1473,6 +1542,9 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC,
name,
Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@ -1485,6 +1557,9 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(result->ToObjectUnchecked()),
name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
name,
Code::cast(result->ToObjectUnchecked())));
}
return result;
}
@ -1493,11 +1568,13 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
CallStubCompiler::CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
Code::ExtraICState extra_ic_state,
InlineCacheHolderFlag cache_holder)
: arguments_(argc)
, in_loop_(in_loop)
, kind_(kind)
, cache_holder_(cache_holder) {
: arguments_(argc),
in_loop_(in_loop),
kind_(kind),
extra_ic_state_(extra_ic_state),
cache_holder_(cache_holder) {
}
@ -1534,6 +1611,7 @@ MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
type,
extra_ic_state_,
cache_holder_,
in_loop_,
argc);
@ -1559,6 +1637,7 @@ MaybeObject* ConstructStubCompiler::GetCode() {
Code* code = Code::cast(result);
USE(code);
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
return result;
}

18
deps/v8/src/stub-cache.h

@ -177,13 +177,15 @@ class StubCache : public AllStatic {
JSObject* holder,
int index);
MUST_USE_RESULT static MaybeObject* ComputeCallConstant(int argc,
InLoopFlag in_loop,
Code::Kind,
String* name,
Object* object,
JSObject* holder,
JSFunction* function);
MUST_USE_RESULT static MaybeObject* ComputeCallConstant(
int argc,
InLoopFlag in_loop,
Code::Kind,
Code::ExtraICState extra_ic_state,
String* name,
Object* object,
JSObject* holder,
JSFunction* function);
MUST_USE_RESULT static MaybeObject* ComputeCallNormal(int argc,
InLoopFlag in_loop,
@ -660,6 +662,7 @@ class CallStubCompiler: public StubCompiler {
CallStubCompiler(int argc,
InLoopFlag in_loop,
Code::Kind kind,
Code::ExtraICState extra_ic_state,
InlineCacheHolderFlag cache_holder);
MUST_USE_RESULT MaybeObject* CompileCallField(JSObject* object,
@ -705,6 +708,7 @@ class CallStubCompiler: public StubCompiler {
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const Code::Kind kind_;
const Code::ExtraICState extra_ic_state_;
const InlineCacheHolderFlag cache_holder_;
const ParameterCount& arguments() { return arguments_; }

29
deps/v8/src/third_party/strongtalk/LICENSE

@ -0,0 +1,29 @@
Copyright (c) 1994-2006 Sun Microsystems Inc.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Sun Microsystems or the names of contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

18
deps/v8/src/third_party/strongtalk/README.chromium

@ -0,0 +1,18 @@
Name: Strongtalk
URL: http://www.strongtalk.org/
Code from the Strongtalk assembler is used with modification in the following
files:
src/assembler.h
src/assembler.cc
src/arm/assembler-arm.cc
src/arm/assembler-arm.h
src/arm/assembler-arm-inl.h
src/ia32/assembler-ia32.cc
src/ia32/assembler-ia32.h
src/ia32/assembler-ia32-inl.h
src/mips/assembler-mips.cc
src/mips/assembler-mips.h
src/mips/assembler-mips-inl.h
src/x64/assembler-x64.h

15
deps/v8/src/type-info.cc

@ -58,6 +58,9 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) {
}
STATIC_ASSERT(DEFAULT_STRING_STUB == Code::kNoExtraICState);
TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
Handle<Context> global_context) {
global_context_ = global_context;
@ -117,8 +120,16 @@ ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
Handle<String> name) {
int arity = expr->arguments()->length();
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::CALL_IC, NORMAL, OWN_MAP, NOT_IN_LOOP, arity);
// Note: these flags won't let us get maps from stubs with
// non-default extra ic state in the megamorphic case. In the more
// important monomorphic case the map is obtained directly, so it's
// not a problem until we decide to emit more polymorphic code.
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
NORMAL,
Code::kNoExtraICState,
OWN_MAP,
NOT_IN_LOOP,
arity);
return CollectReceiverTypes(expr->position(), name, flags);
}

6
deps/v8/src/type-info.h

@ -219,6 +219,12 @@ class TypeInfo {
};
enum StringStubFeedback {
DEFAULT_STRING_STUB = 0,
STRING_INDEX_OUT_OF_BOUNDS = 1
};
// Forward declarations.
class Assignment;
class BinaryOperation;

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
#define BUILD_NUMBER 8
#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

6
deps/v8/src/x64/assembler-x64.cc

@ -2950,6 +2950,12 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
}
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
emit(data);
}
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emitl(data);

2
deps/v8/src/x64/assembler-x64.h

@ -1261,7 +1261,7 @@ class Assembler : public Malloced {
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data) { UNIMPLEMENTED(); }
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }

1
deps/v8/src/x64/code-stubs-x64.h

@ -198,6 +198,7 @@ class GenericBinaryOpStub: public CodeStub {
}
friend class CodeGenerator;
friend class LCodeGen;
};

6
deps/v8/src/x64/deoptimizer-x64.cc

@ -41,7 +41,8 @@ namespace internal {
int Deoptimizer::table_entry_size_ = 10;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
UNIMPLEMENTED();
// UNIMPLEMENTED, for now just return.
return;
}
@ -68,7 +69,8 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
// UNIMPLEMENTED, for now just return.
return;
}

2
deps/v8/src/x64/frames-x64.h

@ -45,7 +45,7 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
// Number of registers for which space is reserved in safepoints.
// TODO(x64): This should not be 0.
static const int kNumSafepointRegisters = 0;
static const int kNumSafepointRegisters = 8;
// ----------------------------------------------------

8
deps/v8/src/x64/ic-x64.cc

@ -1178,8 +1178,12 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
Label number, non_number, non_string, boolean, probe, miss;
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
Code::Flags flags = Code::ComputeFlags(kind,
NOT_IN_LOOP,
MONOMORPHIC,
Code::kNoExtraICState,
NORMAL,
argc);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
// If the stub cache probing failed, the receiver might be a value.

144
deps/v8/src/x64/lithium-codegen-x64.cc

@ -155,13 +155,13 @@ bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
@ -338,8 +338,9 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
Abort("Unimplemented: %s", "GeneratePrologue");
return false;
ASSERT(is_done());
safepoints_.Emit(masm(), StackSlotCount());
return !is_aborted();
}
@ -492,7 +493,24 @@ void LCodeGen::AddToTranslation(Translation* translation,
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
Abort("Unimplemented: %s", "CallCode");
if (instr != NULL) {
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
__ call(code, mode);
RegisterLazyDeoptimization(instr);
} else {
LPointerMap no_pointers(0);
RecordPosition(no_pointers.position());
__ call(code, mode);
RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
}
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
@ -521,7 +539,30 @@ void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
Abort("Unimplemented: %s", "RegisterEnvironmentForDeoptimization");
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
// [incoming arguments] [spill slots] [pushed outgoing arguments]
// Layout of the environment:
// 0 ..................................................... size-1
// [parameters] [locals] [expression stack including arguments]
// Layout of the translation:
// 0 ........................................................ size - 1 + 4
// [expression stack including arguments] [locals] [4 words] [parameters]
// |>------------ translation_size ------------<|
int frame_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
}
Translation translation(&translations_, frame_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
environment->Register(deoptimization_index, translation.index());
deoptimizations_.Add(environment);
}
}
@ -651,8 +692,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
LOperand* to = move.to();
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
@ -784,12 +825,31 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
Abort("Unimplemented: %s", "DoConstantI");
ASSERT(instr->result()->IsRegister());
__ movl(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoConstantD(LConstantD* instr) {
Abort("Unimplemented: %s", "DoConstantI");
ASSERT(instr->result()->IsDoubleRegister());
XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
// Use xor to produce +0.0 in a fast and compact way, but avoid to
// do so if the constant is -0.0.
if (BitCast<uint64_t, double>(v) == 0) {
__ xorpd(res, res);
} else {
Register tmp = ToRegister(instr->TempAt(0));
int32_t v_int32 = static_cast<int32_t>(v);
if (static_cast<double>(v_int32) == v) {
__ movl(tmp, Immediate(v_int32));
__ cvtlsi2sd(res, tmp);
} else {
uint64_t int_val = BitCast<uint64_t, double>(v);
__ Set(tmp, int_val);
__ movd(res, tmp);
}
}
}
@ -825,7 +885,22 @@ void LCodeGen::DoThrow(LThrow* instr) {
void LCodeGen::DoAddI(LAddI* instr) {
Abort("Unimplemented: %s", "DoAddI");
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
__ addl(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr->environment());
}
}
@ -835,7 +910,13 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
Abort("Unimplemented: %s", "DoArithmeticT");
ASSERT(ToRegister(instr->InputAt(0)).is(rdx));
ASSERT(ToRegister(instr->InputAt(1)).is(rax));
ASSERT(ToRegister(instr->result()).is(rax));
GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS);
stub.SetArgsInRegisters();
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -859,7 +940,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
Abort("Unimplemented: %s", "EmitGoto");
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
// Perform stack overflow check if this goto needs it before jumping.
if (deferred_stack_check != NULL) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, chunk_->GetAssemblyLabel(block));
__ jmp(deferred_stack_check->entry());
deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
} else {
__ jmp(chunk_->GetAssemblyLabel(block));
}
}
}
@ -979,27 +1072,6 @@ void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
}
InstanceType LHasInstanceType::TestType() {
InstanceType from = hydrogen()->from();
InstanceType to = hydrogen()->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
}
Condition LHasInstanceType::BranchCondition() {
InstanceType from = hydrogen()->from();
InstanceType to = hydrogen()->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
UNREACHABLE();
return equal;
}
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Abort("Unimplemented: %s", "DoHasInstanceType");
}

110
deps/v8/src/x64/lithium-x64.cc

@ -90,18 +90,22 @@ void LInstruction::PrintTo(StringStream* stream) {
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
for (int i = 0; i < I; i++) {
stream->Add(i == 0 ? "= " : " ");
inputs_.at(i)->PrintTo(stream);
}
stream->Add("= ");
inputs_.PrintOperandsTo(stream);
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
if (this->HasResult()) {
this->result()->PrintTo(stream);
stream->Add(" ");
results_.PrintOperandsTo(stream);
}
template<typename T, int N>
void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
for (int i = 0; i < N; i++) {
if (i > 0) stream->Add(" ");
elems_[i]->PrintTo(stream);
}
}
@ -172,22 +176,22 @@ void LGoto::PrintDataTo(StringStream* stream) {
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
left()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
right()->PrintTo(stream);
InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
@ -195,35 +199,35 @@ void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
@ -232,14 +236,14 @@ void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
void LTypeofIs::PrintDataTo(StringStream* stream) {
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
@ -253,7 +257,7 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
}
@ -263,7 +267,7 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) {
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[rcx] #%d / ", arity());
stream->Add("[ecx] #%d / ", arity());
}
@ -286,14 +290,14 @@ void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
input()->PrintTo(stream);
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
@ -571,6 +575,13 @@ LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value, new LUnallocated(LUnallocated::ANY));
}
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
@ -743,8 +754,19 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr) {
Abort("Unimplemented: %s", "DoArithmeticT");
return NULL;
ASSERT(op == Token::ADD ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::MUL ||
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, rdx);
LOperand* right_operand = UseFixed(right, rax);
LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@ -825,8 +847,17 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
if (current->IsBranch()) {
instr->set_hydrogen_value(HBranch::cast(current)->value());
if (current->IsBranch() && !instr->IsGoto()) {
// TODO(fschneider): Handle branch instructions uniformly like
// other instructions. This requires us to generate the right
// branch instruction already at the HIR level.
ASSERT(instr->IsControl());
HBranch* branch = HBranch::cast(current);
instr->set_hydrogen_value(branch->value());
HBasicBlock* first = branch->FirstSuccessor();
HBasicBlock* second = branch->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
@ -863,11 +894,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
} else if (value->IsPushArgument()) {
op = new LArgument(argument_index++);
} else {
op = UseOrConstant(value);
if (op->IsUnallocated()) {
LUnallocated* unalloc = LUnallocated::cast(op);
unalloc->set_policy(LUnallocated::ANY);
}
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
@ -1069,7 +1096,23 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
Abort("Unimplemented: %s", "DoAdd");
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new LAddI(left, right);
LInstruction* result = DefineSameAsFirst(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
Abort("Unimplemented: %s", "DoAdd on Doubles");
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
return NULL;
}
@ -1214,7 +1257,8 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
return DefineAsRegister(new LConstantI(value));
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
return DefineAsRegister(new LConstantD(value));
LOperand* temp = TempRegister();
return DefineAsRegister(new LConstantD(value, temp));
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
} else {

1075
deps/v8/src/x64/lithium-x64.h

File diff suppressed because it is too large

63
deps/v8/src/x64/stub-cache-x64.cc

@ -1327,8 +1327,8 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
MaybeObject* CallStubCompiler::GenerateMissBranch() {
MaybeObject* maybe_obj =
StubCache::ComputeCallMiss(arguments().immediate(), kind_);
MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
__ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
@ -1660,9 +1660,15 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
GenerateNameCheck(name, &miss);
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1690,7 +1696,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@ -1698,11 +1704,16 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
StubRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kNanValueRootIndex);
__ ret((argc + 1) * kPointerSize);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kNanValueRootIndex);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -1733,9 +1744,15 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
const int argc = arguments().immediate();
Label miss;
Label name_miss;
Label index_out_of_range;
Label* index_out_of_range_label = &index_out_of_range;
GenerateNameCheck(name, &miss);
if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
index_out_of_range_label = &miss;
}
GenerateNameCheck(name, &name_miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
@ -1765,7 +1782,7 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
index_out_of_range_label,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ ret((argc + 1) * kPointerSize);
@ -1773,11 +1790,16 @@ MaybeObject* CallStubCompiler::CompileStringCharAtCall(
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kEmptyStringRootIndex);
__ ret((argc + 1) * kPointerSize);
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kEmptyStringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
// Restore function name in rcx.
__ Move(rcx, Handle<String>(name));
__ bind(&name_miss);
Object* obj;
{ MaybeObject* maybe_obj = GenerateMissBranch();
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@ -2262,17 +2284,24 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
}
// Setup the context (function already in edi).
// Setup the context (function already in rdi).
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
__ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
if (V8::UseCrankshaft()) {
// TODO(kasperl): For now, we always call indirectly through the
// code field in the function to allow recompilation to take effect
// without changing any of the call sites.
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION);
} else {
Handle<Code> code(function->code());
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
}
// Handle call cache miss.
__ bind(&miss);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);

10
deps/v8/test/cctest/cctest.status

@ -77,6 +77,12 @@ test-deoptimization/DeoptimizeCompare: FAIL
# Tests that time out with crankshaft.
test-api/Threading: SKIP
# BUG(1049): Currently no deoptimization support.
test-serialize/ContextSerialization: SKIP
test-serialize/ContextDeserialization: SKIP
test-debug/BreakPointReturn: SKIP
test-debug/DebugStepLinearMixedICs: SKIP
##############################################################################
[ $arch == arm ]
@ -102,10 +108,6 @@ test-sockets/Socket: SKIP
##############################################################################
[ $arch == arm && $crankshaft ]
# Tests that can fail with crankshaft.
test-deoptimization/DeoptimizeBinaryOperationMOD: PASS || FAIL
test-deoptimization/DeoptimizeBinaryOperationDIV: PASS || FAIL
# Tests that time out with crankshaft.
test-debug/ThreadedDebugging: SKIP
test-debug/DebugBreakLoop: SKIP

61
deps/v8/test/mjsunit/compiler/regress-serialized-slots.js

@ -0,0 +1,61 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The test verifies that parameters of the outer function are correctly
// accessible from the inner closure.
function runner(f, expected) {
for (var i = 0; i < 10000; i++) { // Loop to trigger optimization.
assertEquals(expected, f.call(this, 10));
}
}
Function.prototype.bind = function(thisObject)
{
var func = this;
var args = Array.prototype.slice.call(arguments, 1);
function bound()
{
// Note outer function parameter access (|thisObject|).
return func.apply(
thisObject,
args.concat(Array.prototype.slice.call(arguments, 0)));
}
return bound;
}
function sum(x, y) {
return x + y;
}
function test(n) {
runner(sum.bind(this, n), n + 10);
}
test(1);
test(42);
test(239);

3
deps/v8/test/mjsunit/mjsunit.status

@ -119,6 +119,9 @@ compiler/simple-osr: FAIL
# BUG (1026) This test is currently flaky.
compiler/simple-osr: SKIP
# BUG(1049): Currently no deoptimization support.
debug-liveedit-newsource: SKIP
debug-liveedit-1: SKIP
##############################################################################
[ $arch == mips ]

13
deps/v8/test/mozilla/mozilla.status

@ -836,19 +836,6 @@ js1_5/extensions/regress-371636: SKIP
# BUG(1040): Allow this test to timeout.
js1_5/GC/regress-203278-2: PASS || TIMEOUT
[ $arch == arm && $crankshaft ]
# Test that only fail with crankshaft.
js1_5/Regress/regress-416628: CRASH
js1_5/Regress/regress-96128-n: PASS || CRASH
# BUG(1032): test crashes.
ecma/Date/15.9.3.1-1: PASS || CRASH
ecma/Date/15.9.3.1-2: PASS || CRASH
ecma/Date/15.9.3.1-3: PASS || CRASH
ecma/Date/15.9.3.1-4: PASS || CRASH
ecma/Date/15.9.3.1-5: PASS || CRASH
[ $fast == yes && $arch == arm ]

6
deps/v8/tools/gyp/v8.gyp

@ -581,10 +581,10 @@
'../../src/arm/full-codegen-arm.cc',
'../../src/arm/ic-arm.cc',
'../../src/arm/jump-target-arm.cc',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/lithium-codegen-arm.h',
'../../src/arm/lithium-arm.cc',
'../../src/arm/lithium-arm.h',
'../../src/arm/lithium-codegen-arm.cc',
'../../src/arm/lithium-codegen-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
'../../src/arm/regexp-macro-assembler-arm.cc',
@ -634,6 +634,8 @@
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
'../../src/ia32/lithium-gap-resolver-ia32.cc',
'../../src/ia32/lithium-gap-resolver-ia32.h',
'../../src/ia32/lithium-ia32.cc',
'../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',

Loading…
Cancel
Save