Browse Source

Upgrade V8 to 2.2.6

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
3b75f5070d
  1. 15
      deps/v8/ChangeLog
  2. 2
      deps/v8/include/v8.h
  3. 4
      deps/v8/src/api.cc
  4. 8
      deps/v8/src/arm/assembler-arm.cc
  5. 20
      deps/v8/src/arm/assembler-arm.h
  6. 2
      deps/v8/src/arm/assembler-thumb2.h
  7. 470
      deps/v8/src/arm/codegen-arm.cc
  8. 11
      deps/v8/src/arm/codegen-arm.h
  9. 2
      deps/v8/src/arm/debug-arm.cc
  10. 15
      deps/v8/src/arm/full-codegen-arm.cc
  11. 171
      deps/v8/src/arm/ic-arm.cc
  12. 4
      deps/v8/src/arm/macro-assembler-arm.cc
  13. 59
      deps/v8/src/arm/macro-assembler-arm.h
  14. 6
      deps/v8/src/arm/simulator-arm.cc
  15. 45
      deps/v8/src/arm/stub-cache-arm.cc
  16. 76
      deps/v8/src/arm/virtual-frame-arm.cc
  17. 19
      deps/v8/src/arm/virtual-frame-arm.h
  18. 84
      deps/v8/src/array.js
  19. 31
      deps/v8/src/builtins.cc
  20. 2
      deps/v8/src/codegen.cc
  21. 3
      deps/v8/src/codegen.h
  22. 7
      deps/v8/src/debug.cc
  23. 3
      deps/v8/src/debug.h
  24. 1
      deps/v8/src/flag-definitions.h
  25. 22
      deps/v8/src/ia32/codegen-ia32.cc
  26. 16
      deps/v8/src/ia32/stub-cache-ia32.cc
  27. 17
      deps/v8/src/jsregexp.cc
  28. 20
      deps/v8/src/liveedit.cc
  29. 2
      deps/v8/src/liveedit.h
  30. 4
      deps/v8/src/mark-compact.cc
  31. 4
      deps/v8/src/mips/assembler-mips.h
  32. 4
      deps/v8/src/regexp-macro-assembler-tracer.cc
  33. 19
      deps/v8/src/runtime.cc
  34. 4
      deps/v8/src/version.cc
  35. 370
      deps/v8/src/x64/codegen-x64.cc
  36. 8
      deps/v8/src/x64/codegen-x64.h
  37. 12
      deps/v8/test/cctest/test-api.cc
  38. 8
      deps/v8/test/cctest/test-debug.cc
  39. 6
      deps/v8/test/cctest/test-liveedit.cc
  40. 1
      deps/v8/test/es5conform/es5conform.status
  41. 11
      deps/v8/test/mjsunit/array-pop.js
  42. 10
      deps/v8/test/mjsunit/array-push.js
  43. 4
      deps/v8/test/mjsunit/binary-op-newspace.js
  44. 48
      deps/v8/test/mjsunit/regexp.js
  45. 45
      deps/v8/test/mjsunit/regress/regress-crbug-40931.js
  46. 62
      deps/v8/test/mjsunit/search-string-multiple.js
  47. 2
      deps/v8/tools/utils.py

15
deps/v8/ChangeLog

@ -1,3 +1,14 @@
2010-04-28: Version 2.2.6
Add "amd64" as recognized architecture in scons build script
(by Ryan Dahl <coldredlemur@gmail.com>).
Fix bug in String search and replace with very simple RegExps.
Fix bug in RegExp containing "\b^".
Performance improvements on all platforms.
2010-04-26: Version 2.2.5
Various performance improvements (especially for ARM and x64)
@ -5,8 +16,8 @@
Fixed bug in CPU profiling (http://crbug.com/42137)
Fixed a bug with the natives cache.
Fixed two bugs in the ARM code generator that can cause
Fixed two bugs in the ARM code generator that can cause
wrong calculations.
Fixed a bug that may cause a wrong result for shift operations.

2
deps/v8/include/v8.h

@ -767,7 +767,7 @@ class V8EXPORT Value : public Data {
bool IsInt32() const;
/**
* Returns true if this value is a 32-bit signed integer.
* Returns true if this value is a 32-bit unsigned integer.
*/
bool IsUint32() const;

4
deps/v8/src/api.cc

@ -4020,8 +4020,8 @@ void Debug::ProcessDebugMessages() {
}
Local<Context> Debug::GetDebugContext() {
i::EnterDebugger debugger;
return Utils::ToLocal(i::Debug::debug_context());
ENTER_V8;
return Utils::ToLocal(i::Debugger::GetDebugContext());
}
#endif // ENABLE_DEBUGGER_SUPPORT

8
deps/v8/src/arm/assembler-arm.cc

@ -800,9 +800,10 @@ void Assembler::b(int branch_offset, Condition cond) {
ASSERT(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
if (cond == al)
if (cond == al) {
// Dead code is a good location to emit the constant pool.
CheckConstPool(false, false);
}
}
@ -1784,6 +1785,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();

20
deps/v8/src/arm/assembler-arm.h

@ -941,6 +941,10 @@ class Assembler : public Malloced {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Debugging
// Mark address of the ExitJSFrame code.
@ -956,14 +960,7 @@ class Assembler : public Malloced {
int pc_offset() const { return pc_ - buffer_; }
int current_position() const { return current_position_; }
int current_statement_position() const { return current_position_; }
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
}
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
int current_statement_position() const { return current_statement_position_; }
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
@ -1001,6 +998,13 @@ class Assembler : public Malloced {
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
}
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
}
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
private:
// Code buffer:
// The buffer into which code and relocation info are generated.

2
deps/v8/src/arm/assembler-thumb2.h

@ -898,7 +898,7 @@ class Assembler : public Malloced {
int pc_offset() const { return pc_ - buffer_; }
int current_position() const { return current_position_; }
int current_statement_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }

470
deps/v8/src/arm/codegen-arm.cc

@ -351,17 +351,17 @@ void CodeGenerator::Generate(CompilationInfo* info) {
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
}
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in the
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
#endif
}
}
// Adjust for function-level loop nesting.
@ -570,9 +570,9 @@ void CodeGenerator::Load(Expression* expr) {
void CodeGenerator::LoadGlobal() {
VirtualFrame::SpilledScope spilled_scope(frame_);
__ ldr(r0, GlobalObject());
frame_->EmitPush(r0);
Register reg = frame_->GetTOSRegister();
__ ldr(reg, GlobalObject());
frame_->EmitPush(reg);
}
@ -619,7 +619,7 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
frame_->Adjust(3);
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
__ Push(r2, r1, r0);
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}
@ -687,7 +687,6 @@ Reference::~Reference() {
void CodeGenerator::LoadReference(Reference* ref) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
@ -696,11 +695,11 @@ void CodeGenerator::LoadReference(Reference* ref) {
if (property != NULL) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
LoadAndSpill(property->obj());
Load(property->obj());
if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
LoadAndSpill(property->key());
Load(property->key());
ref->set_type(Reference::KEYED);
}
} else if (var != NULL) {
@ -715,6 +714,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
}
} else {
// Anything else is a runtime error.
VirtualFrame::SpilledScope spilled_scope(frame_);
LoadAndSpill(e);
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
@ -1527,6 +1527,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
__ mov(r2, Operand(name));
__ ldr(r0, MemOperand(sp, 0));
frame_->CallLoadIC(RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
@ -2948,9 +2949,10 @@ void CodeGenerator::VisitConditional(Conditional* node) {
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
if (slot->type() == Slot::LOOKUP) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(slot->var()->is_dynamic());
// JumpTargets do not yet support merging frames so the frame must be
// spilled when jumping to these targets.
JumpTarget slow;
JumpTarget done;
@ -2960,16 +2962,18 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
// perform a runtime call for all variables in the scope
// containing the eval.
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
// If there was no control flow to slow, we can exit early.
if (!slow.is_linked()) {
frame_->EmitPush(r0);
return;
}
frame_->SpillAll();
done.Jump();
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
// Only generate the fast case for locals that rewrite to slots.
// This rules out argument loads.
@ -2992,6 +2996,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
slow.Bind();
VirtualFrame::SpilledScope spilled_scope(frame_);
frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
frame_->EmitPush(r0);
@ -3143,16 +3148,17 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
Register tmp2,
JumpTarget* slow) {
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
Register tmp = frame_->scratch0();
Register tmp2 = frame_->scratch1();
Register context = cp;
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
frame_->SpillAll();
// Check that extension is NULL.
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(tmp2, tmp2);
@ -3170,6 +3176,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
}
if (s->is_eval_scope()) {
frame_->SpillAll();
Label next, fast;
__ Move(tmp, context);
__ bind(&next);
@ -3192,6 +3199,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
// Load the global object.
LoadGlobal();
// Setup the name register and call load IC.
frame_->SpillAllButCopyTOSToR0();
__ mov(r2, Operand(slot->var()->name()));
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
@ -3524,7 +3532,6 @@ void CodeGenerator::VisitProperty(Property* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
@ -3703,7 +3710,7 @@ void CodeGenerator::VisitCall(Call* node) {
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
EmitKeyedLoad(false);
EmitKeyedLoad();
frame_->Drop(); // key
// Put the function below the receiver.
if (property->is_synthetic()) {
@ -4437,8 +4444,7 @@ class DeferredSearchCache: public DeferredCode {
void DeferredSearchCache::Generate() {
__ push(cache_);
__ push(key_);
__ Push(cache_, key_);
__ CallRuntime(Runtime::kGetFromCache, 2);
if (!dst_.is(r0)) {
__ mov(dst_, r0);
@ -5231,34 +5237,105 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void BeforeGenerate();
virtual void Generate();
virtual void AfterGenerate();
private:
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::BeforeGenerate() {
__ StartBlockConstPool();
void DeferredReferenceGetNamedValue::Generate() {
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
// Setup the registers and call load IC.
// On entry to this deferred code, r0 is assumed to already contain the
// receiver from the top of the stack.
__ mov(r2, Operand(name_));
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop(1) instruction to indicate that the
// in-object has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
}
void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
// Setup the name register and call load IC.
__ mov(r2, Operand(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop(1) instruction to indicate that the
// inobject has been inlined.
__ nop(NAMED_PROPERTY_LOAD_INLINED);
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
DeferredReferenceGetKeyedValue() {
set_comment("[ DeferredReferenceGetKeyedValue");
}
virtual void Generate();
};
void DeferredReferenceGetKeyedValue::Generate() {
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has all arguments on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed load has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
}
void DeferredReferenceGetNamedValue::AfterGenerate() {
__ EndBlockConstPool();
class DeferredReferenceSetKeyedValue: public DeferredCode {
public:
DeferredReferenceSetKeyedValue() {
set_comment("[ DeferredReferenceSetKeyedValue");
}
virtual void Generate();
};
void DeferredReferenceSetKeyedValue::Generate() {
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
__ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
__ IncrementCounter(
&Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
// The rest of the instructions in the deferred code must be together.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed load IC. It has receiver amd key on the stack and the value to
// store in r0.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the
// keyed store has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
}
@ -5266,63 +5343,231 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
// Setup the name register and call load IC.
frame_->SpillAllButCopyTOSToR0();
__ mov(r2, Operand(name));
frame_->CallLoadIC(is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
} else {
// Inline the inobject property case.
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(name);
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::named_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// The following instructions are the inlined load of an in-object property.
// Parts of this code is patched, so the exact instructions generated needs
// to be fixed. Therefore the instruction pool is blocked when generating
// this code
// Load the receiver from the stack.
frame_->SpillAllButCopyTOSToR0();
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(name);
#ifdef DEBUG
int kInlinedNamedLoadInstructions = 8;
int kInlinedNamedLoadInstructions = 7;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Load the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0));
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Check that the receiver is a heap object.
__ tst(r1, Operand(kSmiTagMask));
__ tst(r0, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Use initially use an invalid index. The index will be patched by the
// Initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r1, 0));
__ ldr(r0, MemOperand(r0, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
deferred->BindExit();
}
}
void CodeGenerator::EmitKeyedLoad() {
if (loop_nesting() == 0) {
VirtualFrame::SpilledScope spilled(frame_);
Comment cmnt(masm_, "[ Load from keyed property");
frame_->CallKeyedLoadIC();
} else {
// Inline the keyed load.
Comment cmnt(masm_, "[ Inlined load from keyed property");
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_load_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Load the receiver and key from the stack.
frame_->SpillAllButCopyTOSToR1R0();
Register receiver = r0;
Register key = r1;
VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue();
// Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
// The following instructions are the part of the inlined load keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
#ifdef DEBUG
int kInlinedKeyedLoadInstructions = 19;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ mov(scratch2, Operand(Factory::null_value()));
__ cmp(scratch1, scratch2);
deferred->Branch(ne);
// Check that the key is a smi.
__ tst(key, Operand(kSmiTagMask));
deferred->Branch(ne);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch2, ip);
deferred->Branch(ne);
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
__ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (key is a smi).
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ add(scratch1,
scratch1,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0,
MemOperand(scratch1, key, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
__ cmp(r0, scratch2);
// This is the only branch to deferred where r0 and r1 do not contain the
// receiver and key. We can't just load undefined here because we have to
// check the prototype.
deferred->Branch(eq);
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
__ IncrementCounter(&Counters::named_load_inline, 1, r1, r2);
deferred->BindExit();
}
}
void CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode rmode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
frame_->CallCodeObject(ic, rmode, 0);
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
frame_->AssertIsSpilled();
// Generate inlined version of the keyed store if the code is in a loop
// and the key is likely to be a smi.
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
// Inline the keyed store.
Comment cmnt(masm_, "[ Inlined store to keyed property");
DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue();
// Counter will be decremented in the deferred code. Placed here to avoid
// having it in the instruction stream below where patching will occur.
__ IncrementCounter(&Counters::keyed_store_inline, 1,
frame_->scratch0(), frame_->scratch1());
// Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values.
__ tst(r0, Operand(kSmiTagMask));
deferred->Branch(ne);
// Load the key and receiver from the stack.
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r2, MemOperand(sp, kPointerSize));
// Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask));
deferred->Branch(ne);
// Check that the receiver is a heap object.
__ tst(r2, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check that the receiver is a JSArray.
__ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
deferred->Branch(ne);
// Check that the key is within bounds. Both the key and the length of
// the JSArray are smis. Use unsigned comparison to handle negative keys.
__ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
__ cmp(r3, r1);
deferred->Branch(ls); // Unsigned less equal.
// The following instructions are the part of the inlined store keyed
// property code which can be patched. Therefore the exact number of
// instructions generated need to be fixed, so the constant pool is blocked
// while generating this code.
#ifdef DEBUG
int kInlinedKeyedStoreInstructions = 7;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
// Read the fixed array map from the constant pool (not from the root
// array) so that the value can be patched. When debugging, we patch this
// comparison to always fail so that we will hit the IC call in the
// deferred code which will allow the debugger to break for fast case
// stores.
__ mov(r5, Operand(Factory::fixed_array_map()));
__ cmp(r4, r5);
deferred->Branch(ne);
// Store the value.
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ str(r0, MemOperand(r3, r1, LSL,
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedKeyedStoreInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
}
deferred->BindExit();
} else {
frame()->CallKeyedStoreIC();
}
}
@ -5381,12 +5626,8 @@ void Reference::GetValue() {
}
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
ASSERT(property != NULL);
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
cgen_->EmitKeyedLoad(var != NULL);
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
break;
}
@ -5443,10 +5684,8 @@ void Reference::SetValue(InitState init_state) {
ASSERT(property != NULL);
cgen_->CodeForSourcePosition(property->position());
// Call IC code.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
frame->EmitPop(r0); // value
frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
frame->EmitPop(r0); // Value.
cgen_->EmitKeyedStore(property->key()->type());
frame->EmitPush(r0);
cgen_->UnloadReference(this);
break;
@ -5497,8 +5736,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ push(cp);
__ push(r3);
__ Push(cp, r3);
__ TailCallRuntime(Runtime::kNewClosure, 2, 1);
}
@ -6145,20 +6383,12 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found) {
// Currently only lookup for smis. Check for smi if object is not known to be
// a smi.
if (!object_is_smi) {
ASSERT(kSmiTag == 0);
__ tst(object, Operand(kSmiTagMask));
__ b(ne, not_found);
}
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch1;
Register scratch = scratch2;
Register mask = scratch3;
// Load the number string cache.
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
@ -6171,9 +6401,55 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value.
__ and_(scratch, mask, Operand(object, ASR, 1));
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
__ BranchOnSmi(object, &is_smi);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ CheckMap(object,
scratch1,
Factory::heap_number_map(),
not_found,
true);
ASSERT_EQ(8, kDoubleSize);
__ add(scratch1,
object,
Operand(HeapNumber::kValueOffset - kHeapObjectTag));
__ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
__ eor(scratch1, scratch1, Operand(scratch2));
__ and_(scratch1, scratch1, Operand(mask));
// Calculate address of entry in string cache: each entry consists
// of two pointer sized fields.
__ add(scratch1,
number_string_cache,
Operand(scratch1, LSL, kPointerSizeLog2 + 1));
Register probe = mask;
__ ldr(probe,
FieldMemOperand(scratch1, FixedArray::kHeaderSize));
__ BranchOnSmi(probe, not_found);
__ sub(scratch2, object, Operand(kHeapObjectTag));
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
__ sub(probe, probe, Operand(kHeapObjectTag));
__ vldr(d1, probe, HeapNumber::kValueOffset);
__ vcmp(d0, d1);
__ vmrs(pc);
__ b(ne, not_found); // The cache did not contain this value.
__ b(&load_result_from_cache);
} else {
__ b(not_found);
}
}
__ bind(&is_smi);
Register scratch = scratch1;
__ and_(scratch, mask, Operand(object, ASR, 1));
// Calculate address of entry in string cache: each entry consists
// of two pointer sized fields.
__ add(scratch,
@ -6181,15 +6457,15 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Operand(scratch, LSL, kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
Register object1 = scratch1;
__ ldr(object1, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(object, object1);
Register probe = mask;
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(object, probe);
__ b(ne, not_found);
// Get the result from the cache.
__ bind(&load_result_from_cache);
__ ldr(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
__ IncrementCounter(&Counters::number_to_string_native,
1,
scratch1,
@ -6203,13 +6479,13 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, false, &runtime);
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
__ add(sp, sp, Operand(1 * kPointerSize));
__ Ret();
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
}
@ -6328,8 +6604,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
__ push(r1);
__ push(r0);
__ Push(r1, r0);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cc_ == eq) {
@ -6594,8 +6869,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ bind(&slow);
// Push arguments to the stack
__ push(r1);
__ push(r0);
__ Push(r1, r0);
if (Token::ADD == op_) {
// Test for string arguments before calling runtime.
@ -6624,7 +6898,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, r0, r2, r4, r5, true, &string1);
masm, r0, r2, r4, r5, r6, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
@ -6849,8 +7123,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
// If all else failed then we go to the runtime system.
__ bind(&slow);
__ push(lhs); // restore stack
__ push(rhs);
__ Push(lhs, rhs); // Restore stack.
switch (op_) {
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
@ -7248,8 +7521,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ push(r1);
__ push(r0);
__ Push(r1, r0);
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
@ -7723,7 +7995,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
__ ldr(r5, MemOperand(r5));
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
__ Push(r8, r7, r6, r5);
// Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));

11
deps/v8/src/arm/codegen-arm.h

@ -157,7 +157,7 @@ enum ArgumentsAllocationMode {
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
NAMED_PROPERTY_LOAD_INLINED
PROPERTY_ACCESS_INLINED
};
@ -318,12 +318,14 @@ class CodeGenerator: public AstVisitor {
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
void EmitKeyedLoad();
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
void EmitKeyedStore(StaticType* key_type);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
Register tmp2,
JumpTarget* slow);
// Special code for typeof expressions: Unfortunately, we must
@ -839,6 +841,7 @@ class NumberToStringStub: public CodeStub {
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found);

2
deps/v8/src/arm/debug-arm.cc

@ -133,9 +133,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc).
// ----------- S t a t e -------------
// -- r0 : receiver
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- [sp] : receiver
// -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the

15
deps/v8/src/arm/full-codegen-arm.cc

@ -125,7 +125,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
@ -696,8 +696,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object on the stack.
__ ldr(ip, CodeGenerator::GlobalObject());
__ push(ip);
__ ldr(r0, CodeGenerator::GlobalObject());
__ push(r0);
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
@ -739,7 +739,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
__ mov(r1, Operand(key_literal->handle()));
// Push both as arguments to ic.
__ stm(db_w, sp, r2.bit() | r1.bit());
__ Push(r2, r1);
// Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -771,7 +771,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r2, Operand(expr->pattern()));
__ mov(r1, Operand(expr->flags()));
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ Push(r4, r3, r2, r1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
Apply(context_, r0);
@ -785,7 +785,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_properties()));
__ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit() | r0.bit());
__ Push(r3, r2, r1, r0);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
@ -860,7 +860,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(expr->constant_elements()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
__ Push(r3, r2, r1);
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
@ -997,6 +997,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
__ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
}

171
deps/v8/src/arm/ic-arm.cc

@ -61,6 +61,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// dictionary.
//
// r2 - holds the name of the property and is unchanged.
// r4 - used as temporary.
Label done;
@ -108,25 +109,25 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
__ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
__ add(t1, t1, Operand(
__ add(r4, r4, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
__ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
__ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
__ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
__ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
// Check if the key is identical to the name.
__ add(t1, t0, Operand(t1, LSL, 2));
__ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
__ add(r4, t0, Operand(r4, LSL, 2));
__ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
__ cmp(r2, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
@ -136,13 +137,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Check that the value is a normal property.
__ bind(&done); // t1 == t0 + 4*index
__ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
__ bind(&done); // r4 == t0 + 4*index
__ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
__ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
__ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
__ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
}
@ -239,12 +240,11 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -255,12 +255,11 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
__ ldr(r0, MemOperand(sp, 0));
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
// Cache miss: Jump to runtime.
__ bind(&miss);
@ -272,13 +271,11 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
// Load receiver.
__ ldr(r0, MemOperand(sp, 0));
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@ -351,7 +348,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
static void GenerateNormalHelper(MacroAssembler* masm,
int argc,
bool is_global_object,
Label* miss) {
Label* miss,
Register scratch) {
// Search dictionary - put result in register r1.
GenerateDictionaryLoad(masm, miss, r0, r1);
@ -360,7 +358,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ b(eq, miss);
// Check that the value is a JSFunction.
__ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
__ b(ne, miss);
// Patch the receiver with the global proxy if necessary.
@ -409,7 +407,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
GenerateNormalHelper(masm, argc, true, &miss);
GenerateNormalHelper(masm, argc, true, &miss, r4);
// Accessing non-global object: Check for access to global proxy.
Label global_proxy, invoke;
@ -422,7 +420,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
__ b(ne, &miss);
__ bind(&invoke);
GenerateNormalHelper(masm, argc, false, &miss);
GenerateNormalHelper(masm, argc, false, &miss, r4);
// Global object access: Check access rights.
__ bind(&global_proxy);
@ -447,7 +445,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
__ EnterInternalFrame();
// Push the receiver and the name of the function.
__ stm(db_w, sp, r2.bit() | r3.bit());
__ Push(r3, r2);
// Call the entry.
__ mov(r0, Operand(2));
@ -489,10 +487,10 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
__ ldr(r0, MemOperand(sp, 0));
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
@ -508,11 +506,11 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss, probe, global;
__ ldr(r0, MemOperand(sp, 0));
// Check that the receiver isn't a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
@ -551,11 +549,12 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
__ ldr(r3, MemOperand(sp, 0));
__ stm(db_w, sp, r2.bit() | r3.bit());
__ mov(r3, r0);
__ Push(r3, r2);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
@ -563,15 +562,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static inline bool IsInlinedICSite(Address address,
Address* inline_end_address) {
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
@ -579,24 +571,42 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// a branch instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
return false;
}
ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
ASSERT(Assembler::IsBranch(instr_after_nop1));
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
ASSERT(Assembler::IsBranch(instr_after_nop));
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
Address inline_end_address = address_after_nop1 + b_offset;
*inline_end_address = address_after_nop + b_offset;
return true;
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined in-object property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
// The immediate must be represenatble in 12 bits.
// The immediate must be representable in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
Address ldr_property_instr_address = inline_end_address - 4;
Address ldr_property_instr_address =
inline_end_address - Assembler::kInstrSize;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
@ -608,29 +618,61 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
Address ldr_map_instr_address = inline_end_address - 16;
Address ldr_map_instr_address =
inline_end_address - 4 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined keyed load (if present) to
// guarantee failure by holding an invalid map (the null value).
PatchInlinedLoad(address, Heap::null_value());
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address - 18 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
// Find the end of the inlined code for handling the store if this is an
// inlined IC call site.
Address inline_end_address;
if (!IsInlinedICSite(address, &inline_end_address)) return false;
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address - 5 * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
@ -645,7 +687,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
__ Push(r3, r2);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
__ TailCallExternalReference(ref, 2, 1);
@ -660,7 +702,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r2.bit() | r3.bit());
__ Push(r3, r2);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
@ -778,7 +820,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
__ bind(&index_ok);
// Duplicate receiver and key since they are expected on the stack after
// the KeyedLoadIC call.
__ stm(db_w, sp, r0.bit() | r1.bit());
__ Push(r1, r0);
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS);
__ bind(&miss);
@ -1094,8 +1136,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ b(ne, &slow);
// Everything is fine, call runtime.
__ push(r1); // receiver
__ push(r0); // key
__ Push(r1, r0); // Receiver, key.
// Perform tail call to the entry.
__ TailCallExternalReference(ExternalReference(
@ -1115,7 +1156,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ ldm(ia, sp, r2.bit() | r3.bit());
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
__ Push(r3, r2, r0);
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
__ TailCallExternalReference(ref, 3, 1);
@ -1130,7 +1171,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// -- sp[1] : receiver
// -----------------------------------
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
__ Push(r3, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
}
@ -1684,8 +1725,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
__ push(r1);
__ stm(db_w, sp, r2.bit() | r0.bit());
__ Push(r1, r2, r0);
// Perform tail call to the entry.
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
@ -1729,8 +1769,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ BranchOnNotSmi(value, &miss);
// Prepare tail call to StoreIC_ArrayLength.
__ push(receiver);
__ push(value);
__ Push(receiver, value);
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
__ TailCallExternalReference(ref, 2, 1);

4
deps/v8/src/arm/macro-assembler-arm.cc

@ -1101,11 +1101,11 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
}
void MacroAssembler::CompareObjectType(Register function,
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
InstanceType type) {
ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, type_reg, type);
}

59
deps/v8/src/arm/macro-assembler-arm.h

@ -93,6 +93,65 @@ class MacroAssembler: public Assembler {
// well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
if (src1.code() > src2.code()) {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
} else {
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
str(src2, MemOperand(sp, 4, NegPreIndex), cond);
}
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
} else {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
str(src3, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
Push(src2, src3, cond);
}
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2,
Register src3, Register src4, Condition cond = al) {
ASSERT(!src1.is(src2));
ASSERT(!src2.is(src3));
ASSERT(!src1.is(src3));
ASSERT(!src1.is(src4));
ASSERT(!src2.is(src4));
ASSERT(!src3.is(src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
stm(db_w,
sp,
src1.bit() | src2.bit() | src3.bit() | src4.bit(),
cond);
} else {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
str(src4, MemOperand(sp, 4, NegPreIndex), cond);
}
} else {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
Push(src3, src4, cond);
}
} else {
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
Push(src2, src3, src4, cond);
}
}
// ---------------------------------------------------------------------------
// Stack limit support

6
deps/v8/src/arm/simulator-arm.cc

@ -541,7 +541,6 @@ void Simulator::FlushOnePage(intptr_t start, int size) {
void Simulator::CheckICache(Instr* instr) {
#ifdef DEBUG
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@ -560,7 +559,6 @@ void Simulator::CheckICache(Instr* instr) {
memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
#endif
}
@ -2441,7 +2439,9 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
CheckICache(instr);
if (v8::internal::FLAG_check_icache) {
CheckICache(instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;

45
deps/v8/src/arm/stub-cache-arm.cc

@ -296,7 +296,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
__ mov(r2, Operand(Handle<Map>(transition)));
__ stm(db_w, sp, r2.bit() | r0.bit());
__ Push(r2, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
3, 1);
@ -464,8 +464,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ EnterInternalFrame();
__ push(receiver);
__ push(holder);
__ push(name_);
__ Push(holder, name_);
CompileCallLoadPropertyWithInterceptor(masm,
receiver,
@ -510,8 +509,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
Label cleanup;
__ pop(scratch2);
__ push(receiver);
__ push(scratch2);
__ Push(receiver, scratch2);
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
lookup->holder(), scratch1,
@ -523,8 +521,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
__ Move(holder, Handle<AccessorInfo>(callback));
__ push(holder);
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
__ push(scratch1);
__ push(name_);
__ Push(scratch1, name_);
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
@ -725,13 +722,11 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
__ push(reg); // holder
__ push(receiver); // Receiver.
__ push(reg); // Holder.
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
__ push(ip);
__ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
__ push(reg);
__ push(name_reg); // name
__ Push(ip, reg, name_reg);
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
@ -1105,8 +1100,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
// Call the interceptor.
__ EnterInternalFrame();
__ push(holder_reg);
__ push(name_reg);
__ Push(holder_reg, name_reg);
CompileCallLoadPropertyWithInterceptor(masm(),
receiver,
holder_reg,
@ -1233,7 +1227,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::call_global_inline, 1, r3, r4);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@ -1309,7 +1303,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
__ push(r1); // receiver
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
__ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
__ Push(ip, r2, r0);
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
@ -1354,9 +1348,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
// checks.
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
__ push(r1); // receiver.
__ push(r2); // name.
__ push(r0); // value.
__ Push(r1, r2, r0); // Receiver, name, value.
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
@ -1559,35 +1551,34 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ tst(r1, Operand(kSmiTagMask));
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
}
// Check that the map of the global has not changed.
CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
__ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
__ cmp(r4, ip);
__ b(eq, &miss);
}
__ mov(r0, r4);
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ Ret();

76
deps/v8/src/arm/virtual-frame-arm.cc

@ -88,7 +88,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1);
__ pop(r1);
__ pop(r0);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
@ -121,8 +121,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
__ pop(r0);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ push(r1);
__ push(r0);
__ Push(r1, r0);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1);
@ -137,8 +136,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
__ Swap(r0, r1, ip);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
__ push(r1);
__ Push(r0, r1);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0);
@ -270,6 +268,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
ASSERT(SpilledScope::is_spilled());
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
@ -305,6 +304,18 @@ void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
}
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallKeyedStoreIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
@ -398,6 +409,61 @@ void VirtualFrame::EmitPop(Register reg) {
}
void VirtualFrame::SpillAllButCopyTOSToR0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
break;
case R1_TOS:
__ push(r1);
__ mov(r0, r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
__ mov(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ Swap(r0, r1, ip);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
Register VirtualFrame::Peek() {
AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) {

19
deps/v8/src/arm/virtual-frame-arm.h

@ -308,10 +308,18 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver on stack and property name in r2. Result returned in
// r0.
// Call load IC. Receiver is on the stack and the property name is in r2.
// Result is returned in r0.
void CallLoadIC(RelocInfo::Mode mode);
// Call keyed load IC. Key and receiver are on the stack. Result is returned
// in r0.
void CallKeyedLoadIC();
// Call keyed store IC. Key and receiver are on the stack and the value is in
// r0. Result is returned in r0.
void CallKeyedStoreIC();
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
@ -340,6 +348,13 @@ class VirtualFrame : public ZoneObject {
// must be copied to a scratch register before modification.
Register Peek();
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);

84
deps/v8/src/array.js

@ -644,77 +644,26 @@ function ArraySort(comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
var global_receiver;
function InsertionSortWithFunc(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
break;
}
if (!IS_FUNCTION(comparefn)) {
comparefn = function (x, y) {
if (x === y) return 0;
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
a[j + 1] = element;
}
x = ToString(x);
y = ToString(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
}
function QuickSortWithFunc(a, from, to) {
// Insertion sort is faster for short arrays.
if (to - from <= 22) {
InsertionSortWithFunc(a, from, to);
return;
}
var pivot_index = $floor($random() * (to - from)) + from;
var pivot = a[pivot_index];
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
a[pivot_index] = a[from];
a[from] = pivot;
var low_end = from; // Upper bound of the elements lower than pivot.
var high_start = to; // Lower bound of the elements greater than pivot.
// From low_end to i are elements equal to pivot.
// From i to high_start are elements that haven't been compared yet.
for (var i = from + 1; i < high_start; ) {
var element = a[i];
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
i++;
low_end++;
} else if (order > 0) {
high_start--;
a[i] = a[high_start];
a[high_start] = element;
} else { // order == 0
i++;
}
}
QuickSortWithFunc(a, from, low_end);
QuickSortWithFunc(a, high_start, to);
}
function Compare(x,y) {
if (x === y) return 0;
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
x = ToString(x);
y = ToString(y);
if (x == y) return 0;
else return x < y ? -1 : 1;
};
var global_receiver = %GetGlobalReceiver();
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = Compare(tmp, element);
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
@ -743,7 +692,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet.
for (var i = from + 1; i < high_start; ) {
var element = a[i];
var order = Compare(element, pivot);
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
@ -903,12 +852,7 @@ function ArraySort(comparefn) {
num_non_undefined = SafeRemoveArrayHoles(this);
}
if(IS_FUNCTION(comparefn)) {
global_receiver = %GetGlobalReceiver();
QuickSortWithFunc(this, 0, num_non_undefined);
} else {
QuickSort(this, 0, num_non_undefined);
}
QuickSort(this, 0, num_non_undefined);
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
// For compatibility with JSC, we shadow any elements in the prototype

31
deps/v8/src/builtins.cc

@ -300,35 +300,6 @@ static void FillWithHoles(FixedArray* dst, int from, int to) {
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
former_start[1] = Heap::fixed_array_map();
former_start[2] = reinterpret_cast<Object*>(len - 1);
ASSERT_EQ(elms->address() + kPointerSize, (elms + kPointerSize)->address());
return elms + kPointerSize;
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
@ -527,7 +498,7 @@ BUILTIN(ArrayShift) {
if (Heap::new_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space),
// there is no need to update remembered set.
array->set_elements(LeftTrimFixedArray(elms), SKIP_WRITE_BARRIER);
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
AssertNoAllocation no_gc;

2
deps/v8/src/codegen.cc

@ -77,13 +77,11 @@ void CodeGenerator::ProcessDeferred() {
}
// Generate the code.
Comment cmnt(masm_, code->comment());
code->BeforeGenerate();
masm_->bind(code->entry_label());
code->SaveRegisters();
code->Generate();
code->RestoreRegisters();
masm_->jmp(code->exit_label());
code->AfterGenerate();
}
}

3
deps/v8/src/codegen.h

@ -212,9 +212,6 @@ class DeferredCode: public ZoneObject {
void SaveRegisters();
void RestoreRegisters();
virtual void BeforeGenerate() { }
virtual void AfterGenerate() { }
protected:
MacroAssembler* masm_;

7
deps/v8/src/debug.cc

@ -2133,6 +2133,13 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
}
Handle<Context> Debugger::GetDebugContext() {
never_unload_debugger_ = true;
EnterDebugger debugger;
return Debug::debug_context();
}
void Debugger::UnloadDebugger() {
// Make sure that there are no breakpoints left.
Debug::ClearAllBreakPoints();

3
deps/v8/src/debug.h

@ -665,9 +665,12 @@ class Debugger {
static void CallMessageDispatchHandler();
static Handle<Context> GetDebugContext();
// Unload the debugger if possible. Only called when no debugger is currently
// active.
static void UnloadDebugger();
friend void ForceUnloadDebugger(); // In test-debug.cc
inline static bool EventActive(v8::DebugEvent event) {
ScopedLock with(debugger_access_);

1
deps/v8/src/flag-definitions.h

@ -232,6 +232,7 @@ DEFINE_bool(optimize_ast, true, "optimize the ast")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")

22
deps/v8/src/ia32/codegen-ia32.cc

@ -8340,7 +8340,7 @@ Result CodeGenerator::EmitKeyedLoad() {
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
// it is within bounds. Use unsigned comparison to handle negative keys.
__ mov(result.reg(), key.reg());
__ SmiUntag(result.reg());
__ cmp(result.reg(),
@ -8413,27 +8413,27 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
deferred->Branch(not_zero);
}
// Check that the key is a non-negative smi.
__ test(key.reg(), Immediate(kSmiTagMask | kSmiSignMask));
deferred->Branch(not_zero);
// Check that the key is a smi.
if (!key.is_smi()) {
__ test(key.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
}
// Check that the receiver is not a smi.
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
// Check that the receiver is a JSArray.
__ mov(tmp.reg(),
FieldOperand(receiver.reg(), HeapObject::kMapOffset));
__ movzx_b(tmp.reg(),
FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
__ cmp(tmp.reg(), JS_ARRAY_TYPE);
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
deferred->Branch(not_equal);
// Check that the key is within bounds. Both the key and the length of
// the JSArray are smis.
// the JSArray are smis. Use unsigned comparison to handle negative keys.
__ cmp(key.reg(),
FieldOperand(receiver.reg(), JSArray::kLengthOffset));
deferred->Branch(greater_equal);
deferred->Branch(above_equal);
// Get the elements array from the receiver and check that it is not a
// dictionary.

16
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1241,6 +1241,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
// If object is not an array, bail out to regular call.
if (!object->IsJSArray()) {
return Heap::undefined_value();
}
Label miss;
// Get the receiver from the stack.
@ -1389,6 +1394,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// -----------------------------------
ASSERT(check == RECEIVER_MAP_CHECK);
// If object is not an array, bail out to regular call.
if (!object->IsJSArray()) {
return Heap::undefined_value();
}
Label miss, empty_array, call_builtin;
// Get the receiver from the stack.
@ -1476,7 +1486,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function_info->HasCustomCallGenerator()) {
CustomCallGenerator generator =
ToCData<CustomCallGenerator>(function_info->function_data());
return generator(this, object, holder, function, name, check);
Object* result = generator(this, object, holder, function, name, check);
// undefined means bail out to regular compiler.
if (!result->IsUndefined()) {
return result;
}
}
Label miss_in_smi_check;

17
deps/v8/src/jsregexp.cc

@ -4872,17 +4872,18 @@ void Analysis::VisitAssertion(AssertionNode* that) {
SetRelation word_relation =
CharacterRange::WordCharacterRelation(following_chars);
if (word_relation.ContainedIn()) {
// Following character is definitely a word character.
if (word_relation.Disjoint()) {
// Includes the case where following_chars is empty (e.g., end-of-input).
// Following character is definitely *not* a word character.
type = (type == AssertionNode::AT_BOUNDARY) ?
AssertionNode::AFTER_NONWORD_CHARACTER :
AssertionNode::AFTER_WORD_CHARACTER;
AssertionNode::AFTER_WORD_CHARACTER :
AssertionNode::AFTER_NONWORD_CHARACTER;
that->set_type(type);
} else if (word_relation.Disjoint()) {
// Following character is definitely *not* a word character.
} else if (word_relation.ContainedIn()) {
// Following character is definitely a word character.
type = (type == AssertionNode::AT_BOUNDARY) ?
AssertionNode::AFTER_WORD_CHARACTER :
AssertionNode::AFTER_NONWORD_CHARACTER;
AssertionNode::AFTER_NONWORD_CHARACTER :
AssertionNode::AFTER_WORD_CHARACTER;
that->set_type(type);
}
}

20
deps/v8/src/liveedit.cc

@ -49,7 +49,7 @@ namespace internal {
// that helps building the chunk list.
class Differencer {
public:
explicit Differencer(Compare::Input* input)
explicit Differencer(Comparator::Input* input)
: input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
buffer_ = NewArray<int>(len1_ * len2_);
}
@ -70,7 +70,7 @@ class Differencer {
CompareUpToTail(0, 0);
}
void SaveResult(Compare::Output* chunk_writer) {
void SaveResult(Comparator::Output* chunk_writer) {
ResultWriter writer(chunk_writer);
int pos1 = 0;
@ -112,7 +112,7 @@ class Differencer {
}
private:
Compare::Input* input_;
Comparator::Input* input_;
int* buffer_;
int len1_;
int len2_;
@ -195,7 +195,7 @@ class Differencer {
class ResultWriter {
public:
explicit ResultWriter(Compare::Output* chunk_writer)
explicit ResultWriter(Comparator::Output* chunk_writer)
: chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
}
@ -217,7 +217,7 @@ class Differencer {
}
private:
Compare::Output* chunk_writer_;
Comparator::Output* chunk_writer_;
int pos1_;
int pos2_;
int pos1_begin_;
@ -243,8 +243,8 @@ class Differencer {
};
void Compare::CalculateDifference(Compare::Input* input,
Compare::Output* result_writer) {
void Comparator::CalculateDifference(Comparator::Input* input,
Comparator::Output* result_writer) {
Differencer differencer(input);
differencer.Initialize();
differencer.FillTable();
@ -312,7 +312,7 @@ class LineEndsWrapper {
// Represents 2 strings as 2 arrays of lines.
class LineArrayCompareInput : public Compare::Input {
class LineArrayCompareInput : public Comparator::Input {
public:
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
@ -347,7 +347,7 @@ class LineArrayCompareInput : public Compare::Input {
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
// (pos1_begin, pos1_end, pos2_end).
class LineArrayCompareOutput : public Compare::Output {
class LineArrayCompareOutput : public Comparator::Output {
public:
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
: array_(Factory::NewJSArray(10)), current_size_(0),
@ -388,7 +388,7 @@ Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
LineArrayCompareOutput output(line_ends1, line_ends2);
Compare::CalculateDifference(&input, &output);
Comparator::CalculateDifference(&input, &output);
return output.GetResult();
}

2
deps/v8/src/liveedit.h

@ -118,7 +118,7 @@ class LiveEdit : AllStatic {
// A general-purpose comparator between 2 arrays.
class Compare {
class Comparator {
public:
// Holds 2 arrays of some elements allowing to compare any pair of

4
deps/v8/src/mark-compact.cc

@ -1211,8 +1211,8 @@ static void SweepNewSpace(NewSpace* space) {
size = object->Size();
survivors_size += size;
if (Heap::ShouldBePromoted(current, size) &&
TryPromoteObject(object, size)) {
// Aggressively promote young survivors to the old space.
if (TryPromoteObject(object, size)) {
continue;
}

4
deps/v8/src/mips/assembler-mips.h

@ -522,7 +522,9 @@ class Assembler : public Malloced {
int32_t pc_offset() const { return pc_ - buffer_; }
int32_t current_position() const { return current_position_; }
int32_t current_statement_position() const { return current_position_; }
int32_t current_statement_position() const {
return current_statement_position_;
}
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting

4
deps/v8/src/regexp-macro-assembler-tracer.cc

@ -37,8 +37,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
RegExpMacroAssembler* assembler) :
assembler_(assembler) {
unsigned int type = assembler->Implementation();
ASSERT(type < 3);
const char* impl_names[3] = {"IA32", "ARM", "Bytecode"};
ASSERT(type < 4);
const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}

19
deps/v8/src/runtime.cc

@ -3162,7 +3162,7 @@ static bool SearchStringMultiple(Vector<schar> subject,
StringSearchStrategy strategy =
InitializeStringSearch(pattern_string, is_ascii);
switch (strategy) {
case SEARCH_FAIL: return false;
case SEARCH_FAIL: break;
case SEARCH_SHORT:
while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
@ -3189,16 +3189,17 @@ static bool SearchStringMultiple(Vector<schar> subject,
case SEARCH_LONG:
while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
*match_pos = pos;
return false;
*match_pos = pos;
return false;
}
int new_pos = ComplexIndexOf(subject,
pattern_string,
pos + pattern_length);
int match_end = pos + pattern_length;
int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
if (new_pos >= 0) {
// A match has been found.
if (new_pos > pos) {
ReplacementStringBuilder::AddSubjectSlice(builder, pos, new_pos);
// A match has been found.
if (new_pos > match_end) {
ReplacementStringBuilder::AddSubjectSlice(builder,
match_end,
new_pos);
}
pos = new_pos;
builder->Add(pattern);

4
deps/v8/src/version.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 5
#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

370
deps/v8/src/x64/codegen-x64.cc

@ -202,11 +202,21 @@ class FloatingPointHelper : public AllStatic {
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
// operand in src register. Returns operand as floating point number
// in XMM register
// in XMM register. May destroy src register.
static void LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst);
// Code pattern for loading a possible number into a XMM register.
// If the contents of src is not a number, control branches to
// the Label not_number. If contents of src is a smi or a heap number
// object (fp value), it is loaded into the XMM register as a double.
// The register src is not changed, and src may not be kScratchRegister.
static void LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst,
Label *not_number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
// operand_1 in rdx, operand_2 in rax; Returns operands as
@ -5320,6 +5330,22 @@ static bool CouldBeNaN(const Result& result) {
}
// Convert from signed to unsigned comparison to match the way EFLAGS are set
// by FPU and XMM compare instructions.
static Condition DoubleCondition(Condition cc) {
switch (cc) {
case less: return below;
case equal: return equal;
case less_equal: return below_equal;
case greater: return above;
case greater_equal: return above_equal;
default: UNREACHABLE();
}
UNREACHABLE();
return equal;
}
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict,
@ -5391,7 +5417,7 @@ void CodeGenerator::Comparison(AstNode* node,
left_side = right_side;
right_side = temp;
cc = ReverseCondition(cc);
// This may reintroduce greater or less_equal as the value of cc.
// This may re-introduce greater or less_equal as the value of cc.
// CompareStub and the inline code both support all values of cc.
}
// Implement comparison against a constant Smi, inlining the case
@ -5434,22 +5460,13 @@ void CodeGenerator::Comparison(AstNode* node,
// Jump to builtin for NaN.
not_number.Branch(parity_even, &left_side);
left_side.Unuse();
Condition double_cc = cc;
switch (cc) {
case less: double_cc = below; break;
case equal: double_cc = equal; break;
case less_equal: double_cc = below_equal; break;
case greater: double_cc = above; break;
case greater_equal: double_cc = above_equal; break;
default: UNREACHABLE();
}
dest->true_target()->Branch(double_cc);
dest->true_target()->Branch(DoubleCondition(cc));
dest->false_target()->Jump();
not_number.Bind(&left_side);
}
// Setup and call the compare stub.
CompareStub stub(cc, strict);
CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ testq(result.reg(), result.reg());
@ -5642,17 +5659,34 @@ void CodeGenerator::Comparison(AstNode* node,
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi());
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
left_side.type_info().IsDouble() ||
right_side.type_info().IsDouble();
NaNInformation nan_info =
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
kBothCouldBeNaN :
kCantBothBeNaN;
// Inline number comparison handling any combination of smi's and heap
// numbers if:
// code is in a loop
// the compare operation is different from equal
// compare is not a for-loop comparison
// The reason for excluding equal is that it will most likely be done
// with smi's (not heap numbers) and the code to comparing smi's is inlined
// separately. The same reason applies for for-loop comparison which will
// also most likely be smi comparisons.
bool is_loop_condition = (node->AsExpression() != NULL)
&& node->AsExpression()->is_loop_condition();
bool inline_number_compare =
loop_nesting() > 0 && cc != equal && !is_loop_condition;
left_side.ToRegister();
right_side.ToRegister();
if (known_non_smi) {
// Inlined equality check:
// If at least one of the objects is not NaN, then if the objects
// are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
@ -5660,8 +5694,15 @@ void CodeGenerator::Comparison(AstNode* node,
dest->true_target()->Branch(equal);
}
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
// Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
// Call the compare stub.
// TODO(whesse@chromium.org): Enable the inlining flag once
// GenerateInlineNumberComparison is implemented.
CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
// The result is a Smi, which is negative, zero, or positive.
__ SmiTest(answer.reg()); // Sets both zero and sign flag.
@ -5679,15 +5720,23 @@ void CodeGenerator::Comparison(AstNode* node,
Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
is_smi.Branch(both_smi);
// When non-smi, call out to the compare stub, after inlined checks.
// If at least one of the objects is not NaN, then if the objects
// are identical, they are equal.
// Inline the equality check if both operands can't be a NaN. If both
// objects are the same they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmpq(left_side.reg(), right_side.reg());
dest->true_target()->Branch(equal);
}
CompareStub stub(cc, strict);
// Inlined number comparison:
if (inline_number_compare) {
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
}
// Call the compare stub.
// TODO(whesse@chromium.org): Enable the inlining flag once
// GenerateInlineNumberComparison is implemented.
CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ SmiTest(answer.reg()); // Sets both zero and sign flags.
answer.Unuse();
@ -5706,6 +5755,17 @@ void CodeGenerator::Comparison(AstNode* node,
}
void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
ControlDestination* dest) {
ASSERT(left_side->is_register());
ASSERT(right_side->is_register());
// TODO(whesse@chromium.org): Implement this function, and enable the
// corresponding flags in the CompareStub.
}
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
@ -7710,6 +7770,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
Register hash,
Register mask) {
__ and_(hash, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup.
__ shl(hash, Immediate(kPointerSizeLog2 + 1));
}
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
@ -7717,12 +7789,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register scratch2,
bool object_is_smi,
Label* not_found) {
// Currently only lookup for smis. Check for smi if object is not known to be
// a smi.
if (!object_is_smi) {
__ JumpIfNotSmi(object, not_found);
}
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch1;
@ -7738,28 +7804,57 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ subl(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value.
// number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache.
Label is_smi;
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
__ CheckMap(object, Factory::heap_number_map(), not_found, true);
ASSERT_EQ(8, kDoubleSize);
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch;
Register probe = mask;
__ movq(probe,
FieldOperand(number_string_cache,
index,
times_1,
FixedArray::kHeaderSize));
__ JumpIfSmi(probe, not_found);
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
__ comisd(xmm0, xmm1);
__ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value.
__ jmp(&load_result_from_cache);
}
__ bind(&is_smi);
__ movq(scratch, object);
__ SmiToInteger32(scratch, scratch);
__ andl(scratch, mask);
GenerateConvertHashCodeToIndex(masm, scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup
__ shl(scratch, Immediate(kPointerSizeLog2 + 1));
Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmpq(object,
FieldOperand(number_string_cache,
scratch,
index,
times_1,
FixedArray::kHeaderSize));
__ j(not_equal, not_found);
// Get the result from the cache.
__ bind(&load_result_from_cache);
__ movq(result,
FieldOperand(number_string_cache,
scratch,
index,
times_1,
FixedArray::kHeaderSize + kPointerSize));
__ IncrementCounter(&Counters::number_to_string_native, 1);
@ -7777,64 +7872,94 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
}
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
|| (cc == greater) || (cc == greater_equal));
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
if (cc_ == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// Equality is almost reflexive (everything but NaN), so start by testing
// for "identity and not NaN".
{
Label not_identical;
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical);
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
if (never_nan_nan_) {
__ xor_(rax, rax);
// Identical objects can be compared fast, but there are some tricky cases
// for NaN and undefined.
{
Label not_identical;
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical);
if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan);
__ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// Note: if cc_ != equal, never_nan_nan_ is not used.
if (never_nan_nan_ && (cc_ == equal)) {
__ Set(rax, EQUAL);
__ ret(0);
} else {
Label return_equal;
Label heap_number;
// If it's not a heap number, then return equal.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(equal, &heap_number);
__ bind(&return_equal);
__ Set(rax, EQUAL);
__ ret(0);
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// We only allow QNaNs, which have bit 51 set (which also rules out
// the value being Infinity).
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa.
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
__ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
__ xorl(rax, rax);
__ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
__ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
if (cc_ == equal) {
__ setcc(above_equal, rax);
__ ret(0);
} else {
Label return_equal;
Label heap_number;
// If it's not a heap number, then return equal.
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
Factory::heap_number_map());
__ j(equal, &heap_number);
__ bind(&return_equal);
__ xor_(rax, rax);
Label nan;
__ j(above_equal, &nan);
__ Set(rax, EQUAL);
__ ret(0);
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if
// it's not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// We only allow QNaNs, which have bit 51 set (which also rules out
// the value being Infinity).
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
// all bits in the mask are set. We only need to check the word
// that contains the exponent and high bit of the mantissa.
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
__ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
__ xorl(rax, rax);
__ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
__ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
__ setcc(above_equal, rax);
__ bind(&nan);
__ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
}
__ bind(&not_identical);
}
__ bind(&not_identical);
}
if (cc_ == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
@ -7896,36 +8021,43 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ push(rdx);
__ push(rcx);
// Inlined floating point compare.
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
// Push arguments on stack, for helper functions.
FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
__ FCmp();
// Jump to builtin for NaN.
__ j(parity_even, &call_builtin);
// TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
Label below_lbl, above_lbl;
// use rdx, rax to convert unsigned to signed comparison
__ j(below, &below_lbl);
__ j(above, &above_lbl);
__ xor_(rax, rax); // equal
__ ret(2 * kPointerSize);
__ bind(&below_lbl);
__ movq(rax, Immediate(-1));
__ ret(2 * kPointerSize);
// Generate the number comparison code.
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
&non_number_comparison);
FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
&non_number_comparison);
__ comisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered);
// Return a result of -1, 0, or 1, based on EFLAGS.
__ movq(rax, Immediate(0)); // equal
__ movq(rcx, Immediate(1));
__ cmovq(above, rax, rcx);
__ movq(rcx, Immediate(-1));
__ cmovq(below, rax, rcx);
__ ret(2 * kPointerSize); // rax, rdx were pushed
// If one of the numbers was NaN, then the result is always false.
// The cc is never not-equal.
__ bind(&unordered);
ASSERT(cc_ != not_equal);
if (cc_ == less || cc_ == less_equal) {
__ Set(rax, 1);
} else {
__ Set(rax, -1);
}
__ ret(2 * kPointerSize); // rax, rdx were pushed
__ bind(&above_lbl);
__ movq(rax, Immediate(1));
__ ret(2 * kPointerSize); // rax, rdx were pushed
// The number comparison code did not provide a valid result.
__ bind(&non_number_comparison);
}
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
Label check_for_strings;
if (cc_ == equal) {
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
@ -7968,14 +8100,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
int ncr; // NaN compare result
if (cc_ == less || cc_ == less_equal) {
ncr = GREATER;
} else {
ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
ncr = LESS;
}
__ Push(Smi::FromInt(ncr));
__ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
}
// Restore return address on the stack.
@ -8764,6 +8889,27 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
}
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register src,
XMMRegister dst,
Label* not_number) {
Label load_smi, done;
ASSERT(!src.is(kScratchRegister));
__ JumpIfSmi(src, &load_smi);
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
__ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
__ j(not_equal, not_number);
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, src);
__ cvtlsi2sd(dst, kScratchRegister);
__ bind(&done);
}
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
XMMRegister dst1,
XMMRegister dst2) {

8
deps/v8/src/x64/codegen-x64.h

@ -488,6 +488,10 @@ class CodeGenerator: public AstVisitor {
Condition cc,
bool strict,
ControlDestination* destination);
void GenerateInlineNumberComparison(Result* left_side,
Result* right_side,
Condition cc,
ControlDestination* dest);
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
@ -939,6 +943,10 @@ class NumberToStringStub: public CodeStub {
Label* not_found);
private:
static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
Register hash,
Register mask);
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }

12
deps/v8/test/cctest/test-api.cc

@ -569,6 +569,7 @@ THREADED_TEST(UsingExternalAsciiString) {
THREADED_TEST(ScavengeExternalString) {
TestResource::dispose_count = 0;
bool in_new_space = false;
{
v8::HandleScope scope;
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
@ -576,16 +577,18 @@ THREADED_TEST(ScavengeExternalString) {
String::NewExternal(new TestResource(two_byte_string));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
in_new_space = i::Heap::InNewSpace(*istring);
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
CHECK_EQ(0, TestResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestResource::dispose_count);
}
THREADED_TEST(ScavengeExternalAsciiString) {
TestAsciiResource::dispose_count = 0;
bool in_new_space = false;
{
v8::HandleScope scope;
const char* one_byte_string = "test string";
@ -593,10 +596,11 @@ THREADED_TEST(ScavengeExternalAsciiString) {
new TestAsciiResource(i::StrDup(one_byte_string)));
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
i::Heap::CollectGarbage(0, i::NEW_SPACE);
CHECK(i::Heap::InNewSpace(*istring));
in_new_space = i::Heap::InNewSpace(*istring);
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
i::Heap::CollectGarbage(0, i::NEW_SPACE);
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
CHECK_EQ(1, TestAsciiResource::dispose_count);
}

8
deps/v8/test/cctest/test-debug.cc

@ -436,6 +436,12 @@ void CheckDebuggerUnloaded(bool check_functions) {
}
void ForceUnloadDebugger() {
Debugger::never_unload_debugger_ = false;
Debugger::UnloadDebugger();
}
} } // namespace v8::internal
@ -6139,3 +6145,5 @@ TEST(CallingContextIsNotDebugContext) {
debugger_context = v8::Handle<v8::Context>();
CheckDebuggerUnloaded();
}

6
deps/v8/test/cctest/test-liveedit.cc

@ -38,7 +38,7 @@ using namespace v8::internal;
// Anonymous namespace.
namespace {
class StringCompareInput : public Compare::Input {
class StringCompareInput : public Comparator::Input {
public:
StringCompareInput(const char* s1, const char* s2) : s1_(s1), s2_(s2) {
}
@ -72,7 +72,7 @@ class DiffChunkStruct : public ZoneObject {
};
class ListDiffOutputWriter : public Compare::Output {
class ListDiffOutputWriter : public Comparator::Output {
public:
explicit ListDiffOutputWriter(DiffChunkStruct** next_chunk_pointer)
: next_chunk_pointer_(next_chunk_pointer) {
@ -98,7 +98,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk);
Compare::CalculateDifference(&input, &writer);
Comparator::CalculateDifference(&input, &writer);
int len1 = StrLength(s1);
int len2 = StrLength(s2);

1
deps/v8/test/es5conform/es5conform.status

@ -38,7 +38,6 @@ chapter13: UNIMPLEMENTED
chapter14: UNIMPLEMENTED
chapter15/15.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED

11
deps/v8/test/mjsunit/array-pop.js

@ -59,3 +59,14 @@
assertEquals(0, a.length, "length 9th pop");
}
})();
// Test the case of not JSArray receiver.
// Regression test for custom call generators, see issue 684.
(function() {
var a = [];
for (var i = 0; i < 100; i++) a.push(i);
var x = {__proto__: a};
for (var i = 0; i < 100; i++) {
assertEquals(99 - i, x.pop(), i + 'th iteration');
}
})();

10
deps/v8/test/mjsunit/array-push.js

@ -103,3 +103,13 @@
assertEquals(29, a.push(29));
}
})();
// Test the case of not JSArray receiver.
// Regression test for custom call generators, see issue 684.
(function() {
var x = {__proto__: []};
for (var i = 0; i < 100; i++) {
x.push("a");
assertEquals(i + 1, x.length, i + 'th iteration');
}
})();

4
deps/v8/test/mjsunit/binary-op-newspace.js

@ -30,14 +30,14 @@
* in heap number allocation still works.
*/
// Flags: --max-new-space-size=131072
// Flags: --max-new-space-size=262144
function f(x) {
return x % 3;
}
function test() {
for (var i = 0; i < 20000; i++) {
for (var i = 0; i < 40000; i++) {
assertEquals(-1 / 0, 1 / f(-3));
}
}

48
deps/v8/test/mjsunit/regexp.js

@ -436,3 +436,51 @@ assertTrue(re.multiline);
assertEquals(0, re.lastIndex);
assertEquals(37, re.someOtherProperty);
assertEquals(37, re[42]);
// Test boundary-checks.
function assertRegExpTest(re, input, test) {
assertEquals(test, re.test(input), "test:" + re + ":" + input);
}
assertRegExpTest(/b\b/, "b", true);
assertRegExpTest(/b\b$/, "b", true);
assertRegExpTest(/\bb/, "b", true);
assertRegExpTest(/^\bb/, "b", true);
assertRegExpTest(/,\b/, ",", false);
assertRegExpTest(/,\b$/, ",", false);
assertRegExpTest(/\b,/, ",", false);
assertRegExpTest(/^\b,/, ",", false);
assertRegExpTest(/b\B/, "b", false);
assertRegExpTest(/b\B$/, "b", false);
assertRegExpTest(/\Bb/, "b", false);
assertRegExpTest(/^\Bb/, "b", false);
assertRegExpTest(/,\B/, ",", true);
assertRegExpTest(/,\B$/, ",", true);
assertRegExpTest(/\B,/, ",", true);
assertRegExpTest(/^\B,/, ",", true);
assertRegExpTest(/b\b/, "b,", true);
assertRegExpTest(/b\b/, "ba", false);
assertRegExpTest(/b\B/, "b,", false);
assertRegExpTest(/b\B/, "ba", true);
assertRegExpTest(/b\Bb/, "bb", true);
assertRegExpTest(/b\bb/, "bb", false);
assertRegExpTest(/b\b[,b]/, "bb", false);
assertRegExpTest(/b\B[,b]/, "bb", true);
assertRegExpTest(/b\b[,b]/, "b,", true);
assertRegExpTest(/b\B[,b]/, "b,", false);
assertRegExpTest(/[,b]\bb/, "bb", false);
assertRegExpTest(/[,b]\Bb/, "bb", true);
assertRegExpTest(/[,b]\bb/, ",b", true);
assertRegExpTest(/[,b]\Bb/, ",b", false);
assertRegExpTest(/[,b]\b[,b]/, "bb", false);
assertRegExpTest(/[,b]\B[,b]/, "bb", true);
assertRegExpTest(/[,b]\b[,b]/, ",b", true);
assertRegExpTest(/[,b]\B[,b]/, ",b", false);
assertRegExpTest(/[,b]\b[,b]/, "b,", true);
assertRegExpTest(/[,b]\B[,b]/, "b,", false);

45
deps/v8/test/mjsunit/regress/regress-crbug-40931.js

@ -0,0 +1,45 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// See http://crbug.com/40931
// To reproduce this we need to split a comma separated string and check the
// indices which should only contain the numeric indices corresponding to the
// number of values of the split.
var names = "a,b,c,d";
for(var i = 0; i < 10; i++) {
var splitNames = names.split(/,/);
var forInNames = [];
var count = 0;
for (name in splitNames) {
forInNames[count++] = name;
}
forInNames.sort();
assertEquals("0,1,2,3", forInNames.join());
}

62
deps/v8/test/mjsunit/search-string-multiple.js

@ -0,0 +1,62 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test search and replace where we search for a string, not a regexp.
function TestCase(id, expected_output, regexp_source, flags, input) {
print(id);
var re = new RegExp(regexp_source, flags);
var output = input.replace(re, MakeReplaceString);
assertEquals(expected_output, output, id);
}
function MakeReplaceString() {
// Arg 0 is the match, n captures follow, n + 1 is offset of match, n + 2 is
// the subject.
var l = arguments.length;
var a = new Array(l - 3);
a.push(arguments[0]);
for (var i = 2; i < l - 2; i++) {
a.push(arguments[i]);
}
return "[@" + arguments[l - 2] + ":" + a.join(",") + "]";
}
(function () {
TestCase(1,
"ajaxNiceForm.villesHome([@24:#OBJ#])",
"#OBJ#",
"g",
"ajaxNiceForm.villesHome(#OBJ#)");
TestCase(2,
"A long string with no non-ASCII characters",
"Unicode string \u1234",
"g",
"A long string with no non-ASCII characters");
})();

2
deps/v8/tools/utils.py

@ -71,6 +71,8 @@ def GuessArchitecture():
return 'ia32'
elif id == 'i86pc':
return 'ia32'
elif id == 'amd64':
return 'ia32'
else:
return None

Loading…
Cancel
Save