Browse Source

Upgrade V8 to 2.2.5

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
24931f8405
  1. 8
      deps/v8/AUTHORS
  2. 16
      deps/v8/ChangeLog
  3. 80
      deps/v8/src/arm/assembler-arm.cc
  4. 48
      deps/v8/src/arm/assembler-arm.h
  5. 8
      deps/v8/src/arm/builtins-arm.cc
  6. 608
      deps/v8/src/arm/codegen-arm.cc
  7. 37
      deps/v8/src/arm/codegen-arm.h
  8. 10
      deps/v8/src/arm/cpu-arm.cc
  9. 5
      deps/v8/src/arm/disasm-arm.cc
  10. 57
      deps/v8/src/arm/full-codegen-arm.cc
  11. 67
      deps/v8/src/arm/ic-arm.cc
  12. 21
      deps/v8/src/arm/macro-assembler-arm.cc
  13. 101
      deps/v8/src/arm/simulator-arm.cc
  14. 43
      deps/v8/src/arm/simulator-arm.h
  15. 2
      deps/v8/src/arm/stub-cache-arm.cc
  16. 6
      deps/v8/src/arm/virtual-frame-arm.cc
  17. 4
      deps/v8/src/arm/virtual-frame-arm.h
  18. 10
      deps/v8/src/builtins.cc
  19. 2
      deps/v8/src/codegen.cc
  20. 3
      deps/v8/src/codegen.h
  21. 21
      deps/v8/src/globals.h
  22. 6
      deps/v8/src/heap.cc
  23. 57
      deps/v8/src/ia32/codegen-ia32.cc
  24. 587
      deps/v8/src/liveedit-debugger.js
  25. 10
      deps/v8/src/liveedit.cc
  26. 2
      deps/v8/src/liveedit.h
  27. 134
      deps/v8/src/mark-compact.cc
  28. 30
      deps/v8/src/mark-compact.h
  29. 14
      deps/v8/src/regexp.js
  30. 2
      deps/v8/src/runtime.cc
  31. 3
      deps/v8/src/serialize.cc
  32. 34
      deps/v8/src/spaces-inl.h
  33. 212
      deps/v8/src/spaces.cc
  34. 121
      deps/v8/src/spaces.h
  35. 107
      deps/v8/src/string.js
  36. 4
      deps/v8/src/version.cc
  37. 163
      deps/v8/src/x64/codegen-x64.cc
  38. 8
      deps/v8/test/cctest/test-heap.cc
  39. 2
      deps/v8/test/cctest/test-spaces.cc
  40. 45
      deps/v8/test/mjsunit/binary-op-newspace.js
  41. 27
      deps/v8/test/mjsunit/debug-liveedit-newsource.js
  42. 2
      deps/v8/test/mjsunit/debug-stepin-accessor.js
  43. 43
      deps/v8/test/mjsunit/regress/regress-685.js
  44. 9
      deps/v8/test/mjsunit/smi-ops.js
  45. 24
      deps/v8/test/mjsunit/string-replace.js
  46. 26
      deps/v8/tools/gyp/v8.gyp
  47. 2
      deps/v8/tools/v8.xcodeproj/project.pbxproj

8
deps/v8/AUTHORS

@ -5,22 +5,26 @@
Google Inc.
Sigma Designs Inc.
ARM Ltd.
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Subrato K De <subratokde@codeaurora.org>
Dineel D Sule <dsule@codeaurora.org>

16
deps/v8/ChangeLog

@ -1,4 +1,18 @@
2010-04-21: Version 2.3.4
2010-04-26: Version 2.2.5
Various performance improvements (especially for ARM and x64)
Fixed bug in CPU profiling (http://crbug.com/42137)
Fixed a bug with the natives cache.
Fixed two bugs in the ARM code generator that can cause
wrong calculations.
Fixed a bug that may cause a wrong result for shift operations.
2010-04-21: Version 2.2.4
Fixed warnings on arm on newer GCC versions.

80
deps/v8/src/arm/assembler-arm.cc

@ -306,6 +306,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_prinfo_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
last_const_pool_end_ = 0;
last_bound_pos_ = 0;
@ -317,6 +318,7 @@ Assembler::Assembler(void* buffer, int buffer_size) {
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
@ -348,6 +350,51 @@ void Assembler::Align(int m) {
}
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
int Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & Imm24Mask) << 8) >> 6;
}
bool Assembler::IsLdrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
}
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
int offset = instr & Off12Mask; // Zero extended offset.
return positive ? offset : -offset;
}
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = offset >= 0;
if (!positive) offset = -offset;
ASSERT(is_uint12(offset));
// Set bit indicating whether the offset should be added.
instr = (instr & ~B23) | (positive ? B23 : 0);
// Set the actual offset.
return (instr & ~Off12Mask) | offset;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -371,10 +418,10 @@ int Assembler::target_at(int pos) {
}
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0)
if ((instr & CondMask) == nv && (instr & B24) != 0) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
return pos + kPcLoadDelta + imm26;
}
@ -902,6 +949,10 @@ void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
}
// Don't allow nop instructions in the form mov rn, rn to be generated using
// the mov instruction. They must be generated using nop(int)
// pseudo instructions.
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
addrmod1(cond | 13*B21 | s, r0, dst, src);
}
@ -1691,6 +1742,13 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Pseudo instructions.
void Assembler::nop(int type) {
// This is mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
emit(al | 13*B21 | type*B12 | type);
}
void Assembler::lea(Register dst,
const MemOperand& x,
SBit s,
@ -1726,11 +1784,6 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging.
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
@ -1894,12 +1947,17 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// However, some small sequences of instructions must not be broken up by the
// insertion of a constant pool; such sequences are protected by setting
// no_const_pool_before_, which is checked here. Also, recursive calls to
// CheckConstPool are blocked by no_const_pool_before_.
if (pc_offset() < no_const_pool_before_) {
// either const_pool_blocked_nesting_ or no_const_pool_before_, which are
// both checked here. Also, recursive calls to CheckConstPool are blocked by
// no_const_pool_before_.
if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as
// possible.
next_buffer_check_ = no_const_pool_before_;
if (const_pool_blocked_nesting_ > 0) {
next_buffer_check_ = pc_offset() + kInstrSize;
} else {
next_buffer_check_ = no_const_pool_before_;
}
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);

48
deps/v8/src/arm/assembler-arm.h

@ -896,7 +896,7 @@ class Assembler : public Malloced {
const Condition cond = al);
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
void nop(int type = 0);
void push(Register src, Condition cond = al) {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
@ -925,9 +925,21 @@ class Assembler : public Malloced {
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Class for scoping postponing the constant pool generation.
class BlockConstPoolScope {
public:
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
~BlockConstPoolScope() {
assem_->EndBlockConstPool();
}
private:
Assembler* assem_;
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Debugging
@ -946,14 +958,30 @@ class Assembler : public Malloced {
int current_position() const { return current_position_; }
int current_statement_position() const { return current_position_; }
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
}
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
}
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
void instr_at_put(byte* pc, Instr instr) {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static bool IsNop(Instr instr, int type = 0);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
void instr_at_put(int pos, Instr instr) {
*reinterpret_cast<Instr*>(buffer_ + pos) = instr;
@ -1022,8 +1050,9 @@ class Assembler : public Malloced {
// distance between pools.
static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
// Emission of the constant pool may be blocked in some code sequences
int no_const_pool_before_; // block emission before this pc offset
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the last emitted pool to guarantee a maximal distance
int last_const_pool_end_; // pc offset following the last constant pool
@ -1075,6 +1104,7 @@ class Assembler : public Malloced {
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
};
} } // namespace v8::internal

8
deps/v8/src/arm/builtins-arm.cc

@ -593,7 +593,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&loop);
__ str(r7, MemOperand(r5, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r5, Operand(r6));
__ cmp(r5, r6);
__ b(lt, &loop);
}
@ -666,7 +666,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&loop);
__ str(r7, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, Operand(r6));
__ cmp(r2, r6);
__ b(lt, &loop);
}
@ -863,7 +863,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(r0, MemOperand(r0)); // dereference handle
__ push(r0); // push parameter
__ bind(&entry);
__ cmp(r4, Operand(r2));
__ cmp(r4, r2);
__ b(ne, &loop);
// Initialize all JavaScript callee-saved registers, since they will be seen
@ -1213,7 +1213,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments;
Label enough, too_few;
__ cmp(r0, Operand(r2));
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);

608
deps/v8/src/arm/codegen-arm.cc

@ -206,7 +206,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
#ifdef DEBUG
JumpTarget verified_true;
__ cmp(r0, Operand(cp));
__ cmp(r0, cp);
verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
verified_true.Bind();
@ -247,29 +247,10 @@ void CodeGenerator::Generate(CompilationInfo* info) {
}
// Store the arguments object. This must happen after context
// initialization because the arguments object may be stored in the
// context.
if (scope()->arguments() != NULL) {
Comment cmnt(masm_, "[ allocate arguments object");
ASSERT(scope()->arguments_shadow() != NULL);
Variable* arguments = scope()->arguments()->var();
Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the
// frame pointer on the stack.
const int kReceiverDisplacement = 2 + scope()->num_parameters();
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
frame_->Adjust(3);
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
StoreToSlot(arguments->slot(), NOT_CONST_INIT);
StoreToSlot(shadow->slot(), NOT_CONST_INIT);
frame_->Drop(); // Value is no longer needed.
// initialization because the arguments object may be stored in
// the context.
if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
StoreArgumentsObject(true);
}
// Initialize ThisFunction reference if present.
@ -353,37 +334,34 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->CallRuntime(Runtime::kTraceExit, 1);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
int return_sequence_length = Assembler::kJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
// Additional mov instruction generated.
return_sequence_length++;
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
}
masm_->BlockConstPoolFor(return_sequence_length);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. The add instruction above is an addressing
// mode 1 instruction where there are restrictions on which immediate values
// can be encoded in the instruction and which immediate values requires
// use of an additional instruction for moving the immediate to a temporary
// register.
ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
// expected by the debugger. If the sp_delts above cannot be encoded in the
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
#endif
}
// Adjust for function-level loop nesting.
@ -393,6 +371,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Code generation state must be reset.
ASSERT(!has_cc());
ASSERT(state_ == NULL);
ASSERT(loop_nesting() == 0);
ASSERT(!function_return_is_shadowed_);
function_return_.Unuse();
DeleteFrame();
@ -606,6 +585,66 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
}
ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
ASSERT(scope()->arguments_shadow() != NULL);
// We don't want to do lazy arguments allocation for functions that
// have heap-allocated contexts, because it interfers with the
// uninitialized const tracking in the context objects.
return (scope()->num_heap_slots() > 0)
? EAGER_ARGUMENTS_ALLOCATION
: LAZY_ARGUMENTS_ALLOCATION;
}
void CodeGenerator::StoreArgumentsObject(bool initial) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ArgumentsAllocationMode mode = ArgumentsMode();
ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
Comment cmnt(masm_, "[ store arguments object");
if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
// When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
frame_->EmitPush(ip);
} else {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address, and the
// frame pointer on the stack.
const int kReceiverDisplacement = 2 + scope()->num_parameters();
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
frame_->Adjust(3);
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}
Variable* arguments = scope()->arguments()->var();
Variable* shadow = scope()->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
JumpTarget done;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
frame_->EmitPop(r0);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
done.Branch(ne);
}
StoreToSlot(arguments->slot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
StoreToSlot(shadow->slot(), NOT_CONST_INIT);
}
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope(frame_);
@ -622,7 +661,7 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
frame_->SpillAll();
} else {
// Anything else can be handled normally.
@ -1466,6 +1505,188 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
// An optimized implementation of expressions of the form
// x.apply(y, arguments).
// If the arguments object of the scope has not been allocated,
// and x.apply is Function.prototype.apply, this optimization
// just copies y and the arguments of the current function on the
// stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand
// and y the receiver.
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
LoadAndSpill(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
__ mov(r2, Operand(name));
frame_->CallLoadIC(RelocInfo::CODE_TARGET);
frame_->EmitPush(r0);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
LoadAndSpill(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
// Contents of the stack at this point:
// sp[0]: arguments object of the current function or the hole.
// sp[1]: receiver
// sp[2]: applicand.apply
// sp[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced.
__ ldr(r0, MemOperand(sp, 0));
Label slow, done;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, r0);
__ b(ne, &slow);
Label build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Stack now has 3 elements on it.
// Contents of stack at this point:
// sp[0]: receiver
// sp[1]: applicand.apply
// sp[2]: applicand.
// Check that the receiver really is a JavaScript object.
__ ldr(r0, MemOperand(sp, 0));
__ BranchOnSmi(r0, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
__ b(lt, &build_args);
// Check that applicand.apply is Function.prototype.apply.
__ ldr(r0, MemOperand(sp, kPointerSize));
__ BranchOnSmi(r0, &build_args);
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
__ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
__ cmp(r1, Operand(apply_code));
__ b(ne, &build_args);
// Check that applicand is a function.
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ BranchOnSmi(r1, &build_args);
__ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
__ b(ne, &build_args);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ mov(r0, Operand(scope()->num_parameters()));
for (int i = 0; i < scope()->num_parameters(); i++) {
__ ldr(r2, frame_->ParameterAt(i));
__ push(r2);
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(r0, Operand(r0, LSR, kSmiTagSize));
__ mov(r3, r0);
__ cmp(r0, Operand(kArgumentsLimit));
__ b(gt, &build_args);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
// r3 is a small non-negative integer, due to the test above.
__ cmp(r3, Operand(0));
__ b(eq, &invoke);
// Compute the address of the first argument.
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
__ add(r2, r2, Operand(kPointerSize));
__ bind(&loop);
// Post-decrement argument address by kPointerSize on each iteration.
__ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
__ push(r4);
__ sub(r3, r3, Operand(1), SetCC);
__ b(gt, &loop);
// Invoke the function.
__ bind(&invoke);
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION);
// Drop applicand.apply and applicand from the stack, and push
// the result of the function call, but leave the spilled frame
// unchanged, with 3 elements, so it is correct when we compile the
// slow-case code.
__ add(sp, sp, Operand(2 * kPointerSize));
__ push(r0);
// Stack now has 1 element:
// sp[0]: result
__ jmp(&done);
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// applicand.apply.
__ bind(&build_args);
// Stack now has 3 elements, because we have jumped from where:
// sp[0]: receiver
// sp[1]: applicand.apply
// sp[2]: applicand.
StoreArgumentsObject(false);
// Stack and frame now have 4 elements.
__ bind(&slow);
// Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so
// applicand looks like the receiver of the applicand.apply call.
// Then process it as a normal function call.
__ ldr(r0, MemOperand(sp, 3 * kPointerSize));
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
__ str(r1, MemOperand(sp, 3 * kPointerSize));
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
frame_->CallStub(&call_function, 3);
// The function and its two arguments have been dropped.
frame_->Drop(); // Drop the receiver as well.
frame_->EmitPush(r0);
// Stack now has 1 element:
// sp[0]: result
__ bind(&done);
// Restore the context register after a call.
__ ldr(cp, frame_->Context());
}
void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(has_cc());
@ -1771,7 +1992,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
}
#ifdef DEBUG
JumpTarget verified_true;
__ cmp(r0, Operand(cp));
__ cmp(r0, cp);
verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
verified_true.Bind();
@ -2248,7 +2469,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ ldr(r0, frame_->ElementAt(0)); // load the current count
__ ldr(r1, frame_->ElementAt(1)); // load the length
__ cmp(r0, Operand(r1)); // compare to the array length
__ cmp(r0, r1); // compare to the array length
node->break_target()->Branch(hs);
__ ldr(r0, frame_->ElementAt(0));
@ -2802,6 +3023,34 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) {
LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
// ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
VirtualFrame::SpilledScope spilled_scope(frame_);
// Load the loaded value from the stack into r0 but leave it on the
// stack.
__ ldr(r0, MemOperand(sp, 0));
// If the loaded value is the sentinel that indicates that we
// haven't loaded the arguments object yet, we need to do it now.
JumpTarget exit;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r0, ip);
exit.Branch(ne);
frame_->Drop();
StoreArgumentsObject(false);
exit.Bind();
}
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP) {
@ -2940,20 +3189,13 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
__ bind(&fast);
}
// All extension objects were empty and it is safe to use a global
// load IC call.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Load the global object.
LoadGlobal();
// Setup the name register.
// Setup the name register and call load IC.
__ mov(r2, Operand(slot->var()->name()));
// Call IC stub.
if (typeof_state == INSIDE_TYPEOF) {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
} else {
frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, 0);
}
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT);
// Drop the global object. The result is in r0.
frame_->Drop();
}
@ -2964,7 +3206,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
int original_height = frame_->height();
#endif
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, NOT_INSIDE_TYPEOF);
LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@ -3422,21 +3664,37 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
LoadAndSpill(property->obj()); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
LoadAndSpill(args->at(i));
}
Handle<String> name = Handle<String>::cast(literal->handle());
// Set the name register and call the IC initialization code.
__ mov(r2, Operand(literal->handle()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->EmitPush(r0);
if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
name->IsEqualTo(CStrVector("apply")) &&
args->length() == 2 &&
args->at(1)->AsVariableProxy() != NULL &&
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
} else {
LoadAndSpill(property->obj()); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
LoadAndSpill(args->at(i));
}
// Set the name register and call the IC initialization code.
__ mov(r2, Operand(name));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
CodeForSourcePosition(node->position());
frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
__ ldr(cp, frame_->Context());
frame_->EmitPush(r0);
}
} else {
// -------------------------------------------
@ -3974,19 +4232,49 @@ void CodeGenerator::GenerateRandomHeapNumber(
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
__ AllocateHeapNumber(r0, r1, r2, &slow_allocate_heapnumber);
__ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
// To allocate a heap number, and ensure that it is not a smi, we
// call the runtime function FUnaryMinus on 0, returning the double
// -0.0. A new, distinct heap number is returned each time.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ CallRuntime(Runtime::kNumberUnaryMinus, 1);
__ mov(r4, Operand(r0));
__ bind(&heapnumber_allocated);
__ PrepareCallCFunction(1, r1);
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(), 1);
frame_->EmitPush(r0);
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP3)) {
__ PrepareCallCFunction(0, r1);
__ CallCFunction(ExternalReference::random_uint32_function(), 0);
CpuFeatures::Scope scope(VFP3);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand(0));
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
__ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset);
frame_->EmitPush(r4);
} else {
__ mov(r0, Operand(r4));
__ PrepareCallCFunction(1, r1);
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(), 1);
frame_->EmitPush(r0);
}
}
@ -4172,18 +4460,20 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
frame_->EmitPush(r0);
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
frame_->EmitPop(r2);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
__ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ mov(r1, Operand(cache_obj));
__ ldr(r0, FieldMemOperand(r1, kFingerOffset));
// r0 now holds finger offset as a smi.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -4255,7 +4545,7 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
LoadAndSpill(args->at(1));
frame_->EmitPop(r0);
frame_->EmitPop(r1);
__ cmp(r0, Operand(r1));
__ cmp(r0, r1);
cc_reg_ = eq;
}
@ -4935,6 +5225,97 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void BeforeGenerate();
virtual void Generate();
virtual void AfterGenerate();
private:
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::BeforeGenerate() {
__ StartBlockConstPool();
}
void DeferredReferenceGetNamedValue::Generate() {
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
// Setup the name register and call load IC.
__ mov(r2, Operand(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop(1) instruction to indicate that the
// inobject has been inlined.
__ nop(NAMED_PROPERTY_LOAD_INLINED);
}
void DeferredReferenceGetNamedValue::AfterGenerate() {
__ EndBlockConstPool();
}
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
// Setup the name register and call load IC.
__ mov(r2, Operand(name));
frame_->CallLoadIC(is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
} else {
// Inline the inobject property case.
Comment cmnt(masm(), "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(name);
// The following instructions are the inlined load of an in-object property.
// Parts of this code is patched, so the exact instructions generated needs
// to be fixed. Therefore the instruction pool is blocked when generating
// this code
#ifdef DEBUG
int kInlinedNamedLoadInstructions = 8;
Label check_inlined_codesize;
masm_->bind(&check_inlined_codesize);
#endif
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Load the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0));
// Check that the receiver is a heap object.
__ tst(r1, Operand(kSmiTagMask));
deferred->Branch(eq);
// Check the map. The null map used below is patched by the inline cache
// code.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ mov(r3, Operand(Factory::null_value()));
__ cmp(r2, r3);
deferred->Branch(ne);
// Use initially use an invalid index. The index will be patched by the
// inline cache code.
__ ldr(r0, MemOperand(r1, 0));
}
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
__ IncrementCounter(&Counters::named_load_inline, 1, r1, r2);
deferred->BindExit();
}
}
void CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@ -4986,24 +5367,16 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
Variable* var = expression_->AsVariableProxy()->AsVariable();
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Setup the name register.
__ mov(r2, Operand(name));
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->EmitPush(r0);
break;
}
@ -5400,7 +5773,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = r5;
__ cmp(r0, Operand(r1));
__ cmp(r0, r1);
__ b(ne, &not_identical);
// The two objects are identical. If we know that one of them isn't NaN then
@ -5429,7 +5802,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, Operand(r2));
__ cmp(r0, r2);
__ b(ne, &return_equal);
if (cc == le) {
// undefined <= undefined should fail.
@ -5992,8 +6365,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin) {
Label slow, slow_pop_2_first, do_the_call;
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
Label slow, slow_reverse, do_the_call;
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
@ -6002,7 +6374,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@ -6032,11 +6404,15 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
if (lhs.is(r0)) {
__ Swap(r0, r1, ip);
}
if (ShouldGenerateFPCode()) {
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
switch (op_) {
case Token::ADD:
@ -6054,7 +6430,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
__ AllocateHeapNumber(r5, r6, r7, &slow);
__ AllocateHeapNumber(r5, r6, r7, &slow);
}
// Move r0 to a double in r2-r3.
@ -6097,11 +6473,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ pop(lr);
}
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
Label r1_is_not_smi;
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
__ jmp(&r1_is_smi);
}
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
@ -6194,6 +6581,14 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
__ pop(pc);
}
}
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
@ -7743,7 +8138,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ cmp(r0, Operand(Factory::fixed_array_map()));
__ LoadRoot(ip, kFixedArrayMapRootIndex);
__ cmp(r0, ip);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.

37
deps/v8/src/arm/codegen-arm.h

@ -143,6 +143,24 @@ class CodeGenState BASE_EMBEDDED {
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// Different nop operations are used by the code generator to detect certain
// states of the generated code.
enum NopMarkerTypes {
NON_MARKING_NOP = 0,
NAMED_PROPERTY_LOAD_INLINED
};
// -------------------------------------------------------------------------
// CodeGenerator
@ -241,6 +259,12 @@ class CodeGenerator: public AstVisitor {
// Main code generation function
void Generate(CompilationInfo* info);
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
void StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
@ -284,9 +308,14 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a named property, leaving it in r0. The receiver is passed on the
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
@ -335,6 +364,14 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
void CheckStack();

10
deps/v8/src/arm/cpu-arm.cc

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// CPU specific code for arm independent of OS goes here.
#if defined(__arm__)
#ifdef __arm__
#include <sys/syscall.h> // for cache flushing.
#endif
@ -35,6 +35,10 @@
#include "cpu.h"
#include "macro-assembler.h"
#ifndef __arm__
#include "simulator-arm.h" // for cache flushing.
#endif
namespace v8 {
namespace internal {
@ -46,9 +50,11 @@ void CPU::Setup() {
void CPU::FlushICache(void* start, size_t size) {
#if !defined (__arm__)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. No I$ flushes are necessary.
// building an ARM emulator based target. We should notify the simulator
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
assembler::arm::Simulator::FlushICache(start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,

5
deps/v8/src/arm/disasm-arm.cc

@ -34,10 +34,9 @@
// NameConverter converter;
// Disassembler d(converter);
// for (byte* pc = begin; pc < end;) {
// char buffer[128];
// buffer[0] = '\0';
// v8::internal::EmbeddedVector<char, 256> buffer;
// byte* prev_pc = pc;
// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }

57
deps/v8/src/arm/full-codegen-arm.cc

@ -194,36 +194,34 @@ void FullCodeGenerator::EmitReturnSequence(int position) {
__ CallRuntime(Runtime::kTraceExit, 1);
}
#ifdef DEBUG
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
// Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence.
int num_parameters = scope()->num_parameters();
int32_t sp_delta = (num_parameters + 1) * kPointerSize;
int return_sequence_length = Assembler::kJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
// Additional mov instruction generated.
return_sequence_length++;
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
}
masm_->BlockConstPoolFor(return_sequence_length);
CodeGenerator::RecordPositions(masm_, position);
__ RecordJSReturn();
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
__ add(sp, sp, Operand(sp_delta));
__ Jump(lr);
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. The add instruction above is an addressing
// mode 1 instruction where there are restrictions on which immediate values
// can be encoded in the instruction and which immediate values requires
// use of an additional instruction for moving the immediate to a temporary
// register.
ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
// expected by the debugger. If the sp_delts above cannot be encoded in the
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
#endif
}
}
@ -1594,10 +1592,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
int count_value = expr->op() == Token::INC ? 1 : -1;
if (loop_depth() > 0) {
__ add(r0, r0, Operand(expr->op() == Token::INC
? Smi::FromInt(1)
: Smi::FromInt(-1)));
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
@ -1605,11 +1602,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ b(eq, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(r1));
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
__ mov(r1, Operand(expr->op() == Token::INC
? Smi::FromInt(1)
: Smi::FromInt(-1)));
__ mov(r1, Operand(Smi::FromInt(count_value)));
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
__ bind(&done);

67
deps/v8/src/arm/ic-arm.cc

@ -27,7 +27,9 @@
#include "v8.h"
#include "assembler-arm.h"
#include "codegen-inl.h"
#include "disasm.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@ -561,21 +563,72 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
// TODO(181): Implement map patching once loop nesting is tracked on the
// ARM platform so we can generate inlined fast-case code loads in
// loops.
void LoadIC::ClearInlinedVersion(Address address) {}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if present) to
// guarantee failure by holding an invalid map (the null value). The offset
// can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0);
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
// handling the miss in the inlined code. After the nop1 instruction there is
// a branch instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
return false;
}
ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
ASSERT(Assembler::IsBranch(instr_after_nop1));
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
Address inline_end_address = address_after_nop1 + b_offset;
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
// The immediate must be represenatble in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
Address ldr_property_instr_address = inline_end_address - 4;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
ldr_property_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
// Indicate that code has changed.
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
Address ldr_map_instr_address = inline_end_address - 16;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
}
@ -656,8 +709,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3));
__ b(ge, &slow);
__ cmp(r0, r3);
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));

21
deps/v8/src/arm/macro-assembler-arm.cc

@ -117,18 +117,19 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
// ldr ip, [pc, #...]
// blx ip
// The two instructions (ldr and blx) could be separated by a literal
// The two instructions (ldr and blx) could be separated by a constant
// pool and the code would still work. The issue comes from the
// patching code which expect the ldr to be just above the blx.
BlockConstPoolFor(2);
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
// positions when pc is the target, since this is not the case here
// we have to do it explicitly.
WriteRecordedPositions();
mov(ip, Operand(target, rmode), LeaveCC, cond);
blx(ip, cond);
{ BlockConstPoolScope block_const_pool(this);
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
// positions when pc is the target, since this is not the case here
// we have to do it explicitly.
WriteRecordedPositions();
mov(ip, Operand(target, rmode), LeaveCC, cond);
blx(ip, cond);
}
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else

101
deps/v8/src/arm/simulator-arm.cc

@ -474,6 +474,96 @@ void Debugger::Debug() {
}
static bool ICacheMatch(void* one, void* two) {
ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
return one == two;
}
static uint32_t ICacheHash(void* key) {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
}
static bool AllOnOnePage(uintptr_t start, int size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
return start_page == end_page;
}
void Simulator::FlushICache(void* start_addr, size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
size += intra_line;
size = ((size - 1) | CachePage::kLineMask) + 1;
int offset = (start & CachePage::kPageMask);
while (!AllOnOnePage(start, size - 1)) {
int bytes_to_flush = CachePage::kPageSize - offset;
FlushOnePage(start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
FlushOnePage(start, size);
}
}
CachePage* Simulator::GetCachePage(void* page) {
v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
ICacheHash(page),
true);
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
return reinterpret_cast<CachePage*>(entry->value);
}
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(intptr_t start, int size) {
ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(page);
char* valid_bytemap = cache_page->ValidityByte(offset);
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
void Simulator::CheckICache(Instr* instr) {
#ifdef DEBUG
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
int offset = (address & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(page);
char* cache_valid_byte = cache_page->ValidityByte(offset);
bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
if (cache_hit) {
// Check that the data in memory matches the contents of the I-cache.
CHECK(memcmp(reinterpret_cast<void*>(instr),
cache_page->CachedData(offset),
Instr::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
#endif
}
// Create one simulator per thread and keep it in thread local storage.
static v8::internal::Thread::LocalStorageKey simulator_key;
@ -489,7 +579,13 @@ void Simulator::Initialize() {
}
v8::internal::HashMap* Simulator::i_cache_ = NULL;
Simulator::Simulator() {
if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch);
}
Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
@ -554,6 +650,9 @@ class Redirection {
swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
fp_return_(fp_return),
next_(list_) {
Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_),
Instr::kInstrSize);
list_ = this;
}
@ -2342,6 +2441,7 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
CheckICache(instr);
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
@ -2536,7 +2636,6 @@ uintptr_t Simulator::PopAddress() {
return address;
}
} } // namespace assembler::arm
#endif // __arm__

43
deps/v8/src/arm/simulator-arm.h

@ -89,11 +89,43 @@ class SimulatorStack : public v8::internal::AllStatic {
#include "constants-arm.h"
#include "hashmap.h"
namespace assembler {
namespace arm {
class CachePage {
public:
static const int LINE_VALID = 0;
static const int LINE_INVALID = 1;
static const int kPageShift = 12;
static const int kPageSize = 1 << kPageShift;
static const int kPageMask = kPageSize - 1;
static const int kLineShift = 2; // The cache line is only 4 bytes right now.
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
CachePage() {
memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
}
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
char* CachedData(int offset) {
return &data_[offset];
}
private:
char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
class Simulator {
public:
friend class Debugger;
@ -162,6 +194,9 @@ class Simulator {
// Pop an address from the JS stack.
uintptr_t PopAddress();
// ICache checking.
static void FlushICache(void* start, size_t size);
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@ -239,6 +274,11 @@ class Simulator {
// Executes one instruction.
void InstructionDecode(Instr* instr);
// ICache.
static void CheckICache(Instr* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
bool fp_return);
@ -276,6 +316,9 @@ class Simulator {
int icount_;
static bool initialized_;
// Icache simulation
static v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;

2
deps/v8/src/arm/stub-cache-arm.cc

@ -53,7 +53,7 @@ static void ProbeTable(MacroAssembler* masm,
// Check that the key in the entry matches the name.
__ mov(ip, Operand(key_offset));
__ ldr(ip, MemOperand(ip, offset, LSL, 1));
__ cmp(name, Operand(ip));
__ cmp(name, ip);
__ b(ne, &miss);
// Get the code entry from the cache.

6
deps/v8/src/arm/virtual-frame-arm.cc

@ -299,6 +299,12 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
}
void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
CallCodeObject(ic, mode, 0);
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {

4
deps/v8/src/arm/virtual-frame-arm.h

@ -308,6 +308,10 @@ class VirtualFrame : public ZoneObject {
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver on stack and property name in r2. Result returned in
// r0.
void CallLoadIC(RelocInfo::Mode mode);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.

10
deps/v8/src/builtins.cc

@ -348,15 +348,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
if (to_trim == 1) {
former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
} else if (to_trim == 2) {
former_start[0] = Heap::raw_unchecked_two_pointer_filler_map();
} else {
former_start[0] = Heap::raw_unchecked_byte_array_map();
ByteArray* as_byte_array = reinterpret_cast<ByteArray*>(elms);
as_byte_array->set_length(ByteArray::LengthFor(to_trim * kPointerSize));
}
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);

2
deps/v8/src/codegen.cc

@ -77,11 +77,13 @@ void CodeGenerator::ProcessDeferred() {
}
// Generate the code.
Comment cmnt(masm_, code->comment());
code->BeforeGenerate();
masm_->bind(code->entry_label());
code->SaveRegisters();
code->Generate();
code->RestoreRegisters();
masm_->jmp(code->exit_label());
code->AfterGenerate();
}
}

3
deps/v8/src/codegen.h

@ -212,6 +212,9 @@ class DeferredCode: public ZoneObject {
void SaveRegisters();
void RestoreRegisters();
virtual void BeforeGenerate() { }
virtual void AfterGenerate() { }
protected:
MacroAssembler* masm_;

21
deps/v8/src/globals.h

@ -50,15 +50,32 @@ namespace internal {
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
#error Your host architecture was not detected as supported by v8
#error Host architecture was not detected as supported by v8
#endif
// Check for supported combinations of host and target architectures.
#if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
#error Target architecture ia32 is only supported on ia32 host
#endif
#if defined(V8_TARGET_ARCH_X64) && !defined(V8_HOST_ARCH_X64)
#error Target architecture x64 is only supported on x64 host
#endif
#if (defined(V8_TARGET_ARCH_ARM) && \
!(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM)))
#error Target architecture arm is only supported on arm and ia32 host
#endif
#if (defined(V8_TARGET_ARCH_MIPS) && \
!(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS)))
#error Target architecture mips is only supported on mips and ia32 host
#endif
// Define unaligned read for the target architectures supporting it.
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
#define V8_TARGET_CAN_READ_UNALIGNED 1
#elif V8_TARGET_ARCH_ARM
#elif V8_TARGET_ARCH_MIPS
#else
#error Your target architecture is not supported by v8
#error Target architecture is not supported by v8
#endif
// Support for alternative bool type. This is only enabled if the code is

6
deps/v8/src/heap.cc

@ -2188,9 +2188,11 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map(Heap::one_pointer_filler_map());
filler->set_map(one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
filler->set_map(two_pointer_filler_map());
} else {
filler->set_map(Heap::byte_array_map());
filler->set_map(byte_array_map());
ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
}
}

57
deps/v8/src/ia32/codegen-ia32.cc

@ -154,8 +154,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
#endif
// New scope to get automatic timing calculation.
{ // NOLINT
HistogramTimerScope codegen_timer(&Counters::code_generation);
{ HistogramTimerScope codegen_timer(&Counters::code_generation);
CodeGenState state(this);
// Entry:
@ -1181,16 +1180,23 @@ static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
case Token::SAR:
if (left.is_smi()) return TypeInfo::Smi();
// Result is a smi if we shift by a constant >= 1, otherwise an integer32.
// Shift amount is masked with 0x1F (ECMA standard 11.7.2).
return (right.is_constant() && right.handle()->IsSmi()
&& Smi::cast(*right.handle())->value() >= 1)
&& (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
? TypeInfo::Smi()
: TypeInfo::Integer32();
case Token::SHR:
// Result is a smi if we shift by a constant >= 2, otherwise an integer32.
return (right.is_constant() && right.handle()->IsSmi()
&& Smi::cast(*right.handle())->value() >= 2)
? TypeInfo::Smi()
: TypeInfo::Integer32();
// Result is a smi if we shift by a constant >= 2, an integer32 if
// we shift by 1, and an unsigned 32-bit integer if we shift by 0.
if (right.is_constant() && right.handle()->IsSmi()) {
int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
if (shift_amount > 1) {
return TypeInfo::Smi();
} else if (shift_amount > 0) {
return TypeInfo::Integer32();
}
}
return TypeInfo::Number();
case Token::ADD:
if (operands_type.IsSmi()) {
// The Integer32 range is big enough to take the sum of any two Smis.
@ -2773,11 +2779,7 @@ void CodeGenerator::Comparison(AstNode* node,
// number comparison in the stub if it was inlined.
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
} else {
__ cmp(answer.reg(), 0);
}
__ test(answer.reg(), Operand(answer.reg()));
answer.Unuse();
dest->Split(cc);
} else {
@ -6551,17 +6553,23 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
frame_->Push(Factory::undefined_value());
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
Result key = frame_->Pop();
key.ToRegister();
Result cache = allocator()->Allocate();
__ mov(cache.reg(), cache_obj);
ASSERT(cache.is_valid());
__ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
__ mov(cache.reg(),
FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
__ mov(cache.reg(),
ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ mov(cache.reg(),
FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
Result tmp = allocator()->Allocate();
ASSERT(tmp.is_valid());
DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
cache.reg(),
@ -7045,6 +7053,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
switch (op) {
case Token::NOT:
case Token::DELETE:
case Token::TYPEOF:
UNREACHABLE(); // handled above
break;
case Token::SUB: {
GenericUnaryOpStub stub(Token::SUB, overwrite);
Result operand = frame_->Pop();
@ -7085,11 +7099,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ not_(answer.reg());
continue_label.Bind(&answer);
if (operand_info.IsInteger32()) {
answer.set_type_info(TypeInfo::Integer32());
} else {
answer.set_type_info(TypeInfo::Number());
}
answer.set_type_info(TypeInfo::Integer32());
frame_->Push(&answer);
}
break;
@ -7119,8 +7129,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
}
default:
// NOT, DELETE, TYPEOF, and VOID are handled outside the
// switch.
UNREACHABLE();
}
}
@ -9814,8 +9822,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line.
TranscendentalCache::Element test_elem[2];
{ TranscendentalCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));

587
deps/v8/src/liveedit-debugger.js

@ -28,26 +28,52 @@
// LiveEdit feature implementation. The script should be executed after
// debug-debugger.js.
// A LiveEdit namespace is declared inside a single function constructor.
// A LiveEdit namespace. It contains functions that modifies JavaScript code
// according to changes of script source (if possible).
//
// When new script source is put in, the difference is calculated textually,
// in form of list of delete/add/change chunks. The functions that include
// change chunk(s) get recompiled, or their enclosing functions are
// recompiled instead.
// If the function may not be recompiled (e.g. it was completely erased in new
// version of the script) it remains unchanged, but the code that could
// create a new instance of this function goes away. An old version of script
// is created to back up this obsolete function.
// All unchanged functions have their positions updated accordingly.
//
// LiveEdit namespace is declared inside a single function constructor.
Debug.LiveEdit = new function() {
// Changes script text and recompiles all relevant functions if possible.
// Applies the change to the script.
// The change is always a substring (change_pos, change_pos + change_len)
// being replaced with a completely different string new_str.
//
// Only one function will have its Code changed in result of this function.
// All nested functions (should they have any instances at the moment) are
// left unchanged and re-linked to a newly created script instance
// representing old version of the source. (Generally speaking,
// during the change all nested functions are erased and completely different
// set of nested functions are introduced.) All other functions just have
// their positions updated.
// This API is a legacy and is obsolete.
//
// @param {Script} script that is being changed
// @param {Array} change_log a list that collects engineer-readable
// description of what happened.
function ApplyPatch(script, change_pos, change_len, new_str,
change_log) {
var old_source = script.source;
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
return ApplyPatchMultiChunk(script,
[ change_pos, change_pos + change_len, change_pos + new_str.length],
new_source, change_log);
}
// Function is public.
this.ApplyPatch = ApplyPatch;
// Forward declaration for minifier.
var FunctionStatus;
// Applies the change to the script.
// The change is in form of list of chunks encoded in a single array as
// a series of triplets (pos1_start, pos1_end, pos2_end)
function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
// Fully compiles source string as a script. Returns Array of
// FunctionCompileInfo -- a descriptions of all functions of the script.
@ -117,27 +143,6 @@ Debug.LiveEdit = new function() {
return compile_info;
}
// Given a positions, finds a function that fully includes the entire
// change.
function FindChangedFunction(compile_info, offset, len) {
// First condition: function should start before the change region.
// Function #0 (whole-script function) always does, but we want
// one, that is later in this list.
var index = 0;
while (index + 1 < compile_info.length &&
compile_info[index + 1].start_position <= offset) {
index++;
}
// Now we are at the last function that begins before the change
// region. The function that covers entire change region is either
// this function or the enclosing one.
for (; compile_info[index].end_position < offset + len;
index = compile_info[index].outer_index) {
Assert(index != -1);
}
return index;
}
// Variable forward declarations. Preprocessor "Minifier" needs them.
var old_compile_info;
var shared_infos;
@ -156,34 +161,27 @@ Debug.LiveEdit = new function() {
// Replaces function's Code.
function PatchCode(new_info, shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
change_log.push( {function_patched: new_info.function_name} );
}
var change_len_old;
var change_len_new;
// Translate position in old version of script into position in new
// version of script.
function PosTranslator(old_pos) {
if (old_pos <= change_pos) {
return old_pos;
}
if (old_pos >= change_pos + change_len_old) {
return old_pos + change_len_new - change_len_old;
if (shared_info) {
%LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array);
change_log.push( {function_patched: new_info.function_name} );
} else {
change_log.push( {function_patched: new_info.function_name,
function_info_not_found: true} );
}
return -1;
}
var position_change_array;
var position_patch_report;
function PatchPositions(new_info, shared_info) {
function PatchPositions(old_info, shared_info) {
if (!shared_info) {
// TODO(LiveEdit): explain what is happening.
// TODO(LiveEdit): function is not compiled yet or is already collected.
position_patch_report.push(
{ name: old_info.function_name, info_not_found: true } );
return;
}
var breakpoint_position_update = %LiveEditPatchFunctionPositions(
shared_info.raw_array, position_change_array);
shared_info.raw_array, diff_array);
for (var i = 0; i < breakpoint_position_update.length; i += 2) {
var new_pos = breakpoint_position_update[i];
var break_point_object = breakpoint_position_update[i + 1];
@ -191,7 +189,7 @@ Debug.LiveEdit = new function() {
{ from: break_point_object.source_position(), to: new_pos } } );
break_point_object.updateSourcePosition(new_pos, script);
}
position_patch_report.push( { name: new_info.function_name } );
position_patch_report.push( { name: old_info.function_name } );
}
var link_to_old_script_report;
@ -199,22 +197,19 @@ Debug.LiveEdit = new function() {
// Makes a function associated with another instance of a script (the
// one representing its old version). This way the function still
// may access its own text.
function LinkToOldScript(shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
link_to_old_script_report.push( { name: shared_info.function_name } );
function LinkToOldScript(shared_info, old_info_node) {
if (shared_info) {
%LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script);
link_to_old_script_report.push( { name: shared_info.function_name } );
} else {
link_to_old_script_report.push(
{ name: old_info_node.info.function_name, not_found: true } );
}
}
var old_source = script.source;
var change_len_old = change_len;
var change_len_new = new_str.length;
// Prepare new source string.
var new_source = old_source.substring(0, change_pos) +
new_str + old_source.substring(change_pos + change_len);
// Find all SharedFunctionInfo's that are compiled from this script.
var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script);
@ -234,94 +229,103 @@ Debug.LiveEdit = new function() {
} catch (e) {
throw new Failure("Failed to compile new version of script: " + e);
}
// An index of a single function, that is going to have its code replaced.
var function_being_patched =
FindChangedFunction(old_compile_info, change_pos, change_len_old);
// In old and new script versions function with a change should have the
// same indexes.
var function_being_patched2 =
FindChangedFunction(new_compile_info, change_pos, change_len_new);
Assert(function_being_patched == function_being_patched2,
"inconsistent old/new compile info");
// Check that function being patched has the same expectations in a new
// version. Otherwise we cannot safely patch its behavior and should
// choose the outer function instead.
while (!CompareFunctionExpectations(
old_compile_info[function_being_patched],
new_compile_info[function_being_patched])) {
Assert(old_compile_info[function_being_patched].outer_index ==
new_compile_info[function_being_patched].outer_index);
function_being_patched =
old_compile_info[function_being_patched].outer_index;
Assert(function_being_patched != -1);
var pos_translator = new PosTranslator(diff_array);
// Build tree structures for old and new versions of the script.
var root_old_node = BuildCodeInfoTree(old_compile_info);
var root_new_node = BuildCodeInfoTree(new_compile_info);
// Analyze changes.
MarkChangedFunctions(root_old_node, pos_translator.GetChunks());
FindCorrespondingFunctions(root_old_node, root_new_node);
// Prepare to-do lists.
var replace_code_list = new Array();
var link_to_old_script_list = new Array();
var update_positions_list = new Array();
function HarvestTodo(old_node) {
function CollectDamaged(node) {
link_to_old_script_list.push(node);
for (var i = 0; i < node.children.length; i++) {
CollectDamaged(node.children[i]);
}
}
if (old_node.status == FunctionStatus.DAMAGED) {
CollectDamaged(old_node);
return;
}
if (old_node.status == FunctionStatus.UNCHANGED) {
update_positions_list.push(old_node);
} else if (old_node.status == FunctionStatus.SOURCE_CHANGED) {
update_positions_list.push(old_node);
} else if (old_node.status == FunctionStatus.CHANGED) {
replace_code_list.push(old_node);
}
for (var i = 0; i < old_node.children.length; i++) {
HarvestTodo(old_node.children[i]);
}
}
HarvestTodo(root_old_node);
// Collect shared infos for functions whose code need to be patched.
var replaced_function_infos = new Array();
for (var i = 0; i < replace_code_list.length; i++) {
var info = FindFunctionInfo(replace_code_list[i].array_index);
if (info) {
replaced_function_infos.push(info);
}
}
// Check that function being patched is not currently on stack.
CheckStackActivations(
[ FindFunctionInfo(function_being_patched) ], change_log );
CheckStackActivations(replaced_function_infos, change_log);
// We haven't changed anything before this line yet.
// Committing all changes.
var old_script_name = CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source,
old_script_name);
PatchCode(new_compile_info[function_being_patched],
FindFunctionInfo(function_being_patched));
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
var position_change_array = [ change_pos,
change_pos + change_len_old,
change_pos + change_len_new ];
// Update positions of all outer functions (i.e. all functions, that
// are partially below the function being patched).
for (var i = new_compile_info[function_being_patched].outer_index;
i != -1;
i = new_compile_info[i].outer_index) {
PatchPositions(new_compile_info[i], FindFunctionInfo(i));
}
// Update positions of all functions that are fully below the function
// being patched.
var old_next_sibling =
old_compile_info[function_being_patched].next_sibling_index;
var new_next_sibling =
new_compile_info[function_being_patched].next_sibling_index;
// We simply go over the tail of both old and new lists. Their tails should
// have an identical structure.
if (old_next_sibling == -1) {
Assert(new_next_sibling == -1);
} else {
Assert(old_compile_info.length - old_next_sibling ==
new_compile_info.length - new_next_sibling);
for (var i = old_next_sibling, j = new_next_sibling;
i < old_compile_info.length; i++, j++) {
PatchPositions(new_compile_info[j], FindFunctionInfo(i));
// Create old script if there are function linked to old version.
if (link_to_old_script_list.length > 0) {
var old_script_name = CreateNameForOldScript(script);
// Update the script text and create a new script representing an old
// version of the script.
var old_script = %LiveEditReplaceScript(script, new_source,
old_script_name);
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
for (var i = 0; i < link_to_old_script_list.length; i++) {
LinkToOldScript(
FindFunctionInfo(link_to_old_script_list[i].array_index),
link_to_old_script_list[i]);
}
}
for (var i = 0; i < replace_code_list.length; i++) {
PatchCode(replace_code_list[i].corresponding_node.info,
FindFunctionInfo(replace_code_list[i].array_index));
}
var link_to_old_script_report = new Array();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
// We need to link to old script all former nested functions.
for (var i = function_being_patched + 1; i < old_next_sibling; i++) {
LinkToOldScript(FindFunctionInfo(i), old_script);
var position_patch_report = new Array();
change_log.push( {position_patched: position_patch_report} );
for (var i = 0; i < update_positions_list.length; i++) {
// TODO(LiveEdit): take into account wether it's source_changed or
// unchanged and whether positions changed at all.
PatchPositions(update_positions_list[i].info,
FindFunctionInfo(update_positions_list[i].array_index));
}
}
// Function is public.
this.ApplyPatch = ApplyPatch;
this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
function Assert(condition, message) {
if (!condition) {
@ -332,6 +336,296 @@ Debug.LiveEdit = new function() {
}
}
}
function DiffChunk(pos1, pos2, len1, len2) {
this.pos1 = pos1;
this.pos2 = pos2;
this.len1 = len1;
this.len2 = len2;
}
function PosTranslator(diff_array) {
var chunks = new Array();
var pos1 = 0;
var pos2 = 0;
for (var i = 0; i < diff_array.length; i += 3) {
pos2 += diff_array[i] - pos1 + pos2;
pos1 = diff_array[i];
chunks.push(new DiffChunk(pos1, pos2, diff_array[i + 1] - pos1,
diff_array[i + 2] - pos2));
pos1 = diff_array[i + 1];
pos2 = diff_array[i + 2];
}
this.chunks = chunks;
}
PosTranslator.prototype.GetChunks = function() {
return this.chunks;
}
PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
var array = this.chunks;
if (array.length == 0 || pos < array[0]) {
return pos;
}
var chunk_index1 = 0;
var chunk_index2 = array.length - 1;
while (chunk_index1 < chunk_index2) {
var middle_index = (chunk_index1 + chunk_index2) / 2;
if (pos < array[middle_index + 1].pos1) {
chunk_index2 = middle_index;
} else {
chunk_index1 = middle_index + 1;
}
}
var chunk = array[chunk_index1];
if (pos >= chunk.pos1 + chunk.len1) {
return pos += chunk.pos2 + chunk.len2 - chunk.pos1 - chunk.len1;
}
if (!inside_chunk_handler) {
inside_chunk_handler = PosTranslator.default_inside_chunk_handler;
}
inside_chunk_handler(pos, chunk);
}
PosTranslator.default_inside_chunk_handler = function() {
Assert(false, "Cannot translate position in chaged area");
}
var FunctionStatus = {
// No change to function or its inner functions; however its positions
// in script may have been shifted.
UNCHANGED: "unchanged",
// The code of a function remains unchanged, but something happened inside
// some inner functions.
SOURCE_CHANGED: "source changed",
// The code of a function is changed or some nested function cannot be
// properly patched so this function must be recompiled.
CHANGED: "changed",
// Function is changed but cannot be patched.
DAMAGED: "damaged"
}
function CodeInfoTreeNode(code_info, children, array_index) {
this.info = code_info;
this.children = children;
// an index in array of compile_info
this.array_index = array_index;
this.parent = void(0);
this.status = FunctionStatus.UNCHANGED;
// Status explanation is used for debugging purposes and will be shown
// in user UI if some explanations are needed.
this.status_explanation = void(0);
this.new_start_pos = void(0);
this.new_end_pos = void(0);
this.corresponding_node = void(0);
}
// From array of function infos that is implicitly a tree creates
// an actual tree of functions in script.
function BuildCodeInfoTree(code_info_array) {
// Throughtout all function we iterate over input array.
var index = 0;
// Recursive function that builds a branch of tree.
function BuildNode() {
var my_index = index;
index++;
var child_array = new Array();
while (index < code_info_array.length &&
code_info_array[index].outer_index == my_index) {
child_array.push(BuildNode());
}
var node = new CodeInfoTreeNode(code_info_array[my_index], child_array,
my_index);
for (var i = 0; i < child_array.length; i++) {
child_array[i].parent = node;
}
return node;
}
var root = BuildNode();
Assert(index == code_info_array.length);
return root;
}
// Applies a list of the textual diff chunks onto the tree of functions.
// Determines status of each function (from unchanged to damaged). However
// children of unchanged functions are ignored.
function MarkChangedFunctions(code_info_tree, chunks) {
// A convenient interator over diff chunks that also translates
// positions from old to new in a current non-changed part of script.
var chunk_it = new function() {
var chunk_index = 0;
var pos_diff = 0;
this.current = function() { return chunks[chunk_index]; }
this.next = function() {
var chunk = chunks[chunk_index];
pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
chunk_index++;
}
this.done = function() { return chunk_index >= chunks.length; }
this.TranslatePos = function(pos) { return pos + pos_diff; }
};
// A recursive function that processes internals of a function and all its
// inner functions. Iterator chunk_it initially points to a chunk that is
// below function start.
function ProcessInternals(info_node) {
info_node.new_start_pos = chunk_it.TranslatePos(
info_node.info.start_position);
var child_index = 0;
var code_changed = false;
var source_changed = false;
// Simultaneously iterates over child functions and over chunks.
while (!chunk_it.done() &&
chunk_it.current().pos1 < info_node.info.end_position) {
if (child_index < info_node.children.length) {
var child = info_node.children[child_index];
if (child.info.end_position <= chunk_it.current().pos1) {
ProcessUnchangedChild(child);
child_index++;
continue;
} else if (child.info.start_position >=
chunk_it.current().pos1 + chunk_it.current().len1) {
code_changed = true;
chunk_it.next();
continue;
} else if (child.info.start_position <= chunk_it.current().pos1 &&
child.info.end_position >= chunk_it.current().pos1 +
chunk_it.current().len1) {
ProcessInternals(child);
source_changed = source_changed ||
( child.status != FunctionStatus.UNCHANGED );
code_changed = code_changed ||
( child.status == FunctionStatus.DAMAGED );
child_index++;
continue;
} else {
code_changed = true;
child.status = FunctionStatus.DAMAGED;
child.status_explanation =
"Text diff overlaps with function boundary";
child_index++;
continue;
}
} else {
if (chunk_it.current().pos1 + chunk_it.current().len1 <=
info_node.info.end_position) {
info_node.status = FunctionStatus.CHANGED;
chunk_it.next();
continue;
} else {
info_node.status = FunctionStatus.DAMAGED;
info_node.status_explanation =
"Text diff overlaps with function boundary";
return;
}
}
Assert("Unreachable", false);
}
while (child_index < info_node.children.length) {
var child = info_node.children[child_index];
ProcessUnchangedChild(child);
child_index++;
}
if (code_changed) {
info_node.status = FunctionStatus.CHANGED;
} else if (source_changed) {
info_node.status = FunctionStatus.SOURCE_CHANGED;
}
info_node.new_end_pos =
chunk_it.TranslatePos(info_node.info.end_position);
}
function ProcessUnchangedChild(node) {
node.new_start_pos = chunk_it.TranslatePos(node.info.start_position);
node.new_end_pos = chunk_it.TranslatePos(node.info.end_position);
}
ProcessInternals(code_info_tree);
}
// For ecah old function (if it is not damaged) tries to find a corresponding
// function in new script. Typically it should succeed (non-damaged functions
// by definition may only have changes inside their bodies). However there are
// reasons for corresponence not to be found; function with unmodified text
// in new script may become enclosed into other function; the innocent change
// inside function body may in fact be something like "} function B() {" that
// splits a function into 2 functions.
function FindCorrespondingFunctions(old_code_tree, new_code_tree) {
// A recursive function that tries to find a correspondence for all
// child functions and for their inner functions.
function ProcessChildren(old_node, new_node) {
var old_children = old_node.children;
var new_children = new_node.children;
var old_index = 0;
var new_index = 0;
while (old_index < old_children.length) {
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
old_index++;
} else if (new_index < new_children.length) {
if (new_children[new_index].info.start_position <
old_children[old_index].new_start_pos) {
new_index++;
} else if (new_children[new_index].info.start_position ==
old_children[old_index].new_start_pos) {
if (new_children[new_index].info.end_position ==
old_children[old_index].new_end_pos) {
old_children[old_index].corresponding_node =
new_children[new_index];
if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
ProcessChildren(old_children[old_index],
new_children[new_index]);
if (old_children[old_index].status == FunctionStatus.DAMAGED) {
old_node.status = FunctionStatus.CHANGED;
}
}
} else {
old_children[old_index].status = FunctionStatus.DAMAGED;
old_children[old_index].status_explanation =
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
}
new_index++;
old_index++;
} else {
old_children[old_index].status = FunctionStatus.DAMAGED;
old_children[old_index].status_explanation =
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
old_index++;
}
} else {
old_children[old_index].status = FunctionStatus.DAMAGED;
old_children[old_index].status_explanation =
"No corresponding function in new script found";
old_node.status = FunctionStatus.CHANGED;
old_index++;
}
}
if (old_node.status == FunctionStatus.CHANGED) {
if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
old_node.status = FunctionStatus.DAMAGED;
old_node.status_explanation = "Changed code expectations";
}
}
}
ProcessChildren(old_code_tree, new_code_tree);
old_code_tree.corresponding_node = new_code_tree;
Assert(old_code_tree.status != FunctionStatus.DAMAGED,
"Script became damaged");
}
// An object describing function compilation details. Its index fields
// apply to indexes inside array that stores these objects.
@ -469,13 +763,12 @@ Debug.LiveEdit = new function() {
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, change_log) {
var old_source = script.source;
var diff = FindSimpleDiff(old_source, new_source);
if (!diff) {
var diff = CompareStringsLinewise(old_source, new_source);
if (diff.length == 0) {
change_log.push( { empty_diff: true } );
return;
}
ApplyPatch(script, diff.change_pos, diff.old_len,
new_source.substring(diff.change_pos, diff.change_pos + diff.new_len),
change_log);
ApplyPatchMultiChunk(script, diff, new_source, change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;

10
deps/v8/src/liveedit.cc

@ -346,7 +346,7 @@ class LineArrayCompareInput : public Compare::Input {
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
// (pos1, len1, len2).
// (pos1_begin, pos1_end, pos2_end).
class LineArrayCompareOutput : public Compare::Output {
public:
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
@ -362,9 +362,9 @@ class LineArrayCompareOutput : public Compare::Output {
SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
SetElement(array_, current_size_ + 1,
Handle<Object>(Smi::FromInt(char_len1)));
Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
SetElement(array_, current_size_ + 2,
Handle<Object>(Smi::FromInt(char_len2)));
Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
current_size_ += 3;
}
@ -717,8 +717,8 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
if (Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
reloc_infos_.Add(*rinfo);
}
}

2
deps/v8/src/liveedit.h

@ -111,7 +111,7 @@ class LiveEdit : AllStatic {
};
// Compares 2 strings line-by-line and returns diff in form of array of
// triplets (pos1, len1, len2) describing list of diff chunks.
// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
Handle<String> s2);
};

134
deps/v8/src/mark-compact.cc

@ -1055,6 +1055,7 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
// The offset of each live object in the page from the first live object
// in the page.
int offset = 0;
@ -1277,6 +1278,29 @@ static void SweepNewSpace(NewSpace* space) {
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
// of pages without live objects and free them (instead of putting them on
// the free list).
// Page preceding current.
Page* prev = Page::FromAddress(NULL);
// First empty page in a sequence.
Page* first_empty_page = Page::FromAddress(NULL);
// Page preceding first empty page.
Page* prec_first_empty_page = Page::FromAddress(NULL);
// If last used page of space ends with a sequence of dead objects
// we can adjust allocation top instead of puting this free area into
// the free list. Thus during sweeping we keep track of such areas
// and defer their deallocation until the sweeping of the next page
// is done: if one of the next pages contains live objects we have
// to put such area into the free list.
Address last_free_start = NULL;
int last_free_size = 0;
while (it.has_next()) {
Page* p = it.next();
@ -1291,8 +1315,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, static_cast<int>(current - free_start));
dealloc(free_start, static_cast<int>(current - free_start), true);
is_previous_alive = true;
}
} else {
@ -1306,39 +1331,113 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// loop.
}
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
int free_size = static_cast<int>(p->AllocationTop() - free_start);
if (free_size > 0) {
dealloc(free_start, free_size);
bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
|| (!is_previous_alive && free_start == p->ObjectAreaStart());
if (page_is_empty) {
// This page is empty. Check whether we are in the middle of
// sequence of empty pages and start one if not.
if (!first_empty_page->is_valid()) {
first_empty_page = p;
prec_first_empty_page = prev;
}
if (!is_previous_alive) {
// There are dead objects on this page. Update space accounting stats
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false);
}
}
} else {
// This page is not empty. Sequence of empty pages ended on the previous
// one.
if (first_empty_page->is_valid()) {
space->FreePages(prec_first_empty_page, prev);
prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
}
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
dealloc(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
// If the last region of this page was not live we remember it.
if (!is_previous_alive) {
ASSERT(last_free_size == 0);
last_free_size = static_cast<int>(p->AllocationTop() - free_start);
last_free_start = free_start;
}
}
prev = p;
}
// We reached end of space. See if we need to adjust allocation top.
Address new_allocation_top = NULL;
if (first_empty_page->is_valid()) {
// Last used pages in space are empty. We can move allocation top backwards
// to the beginning of first empty page.
ASSERT(prev == space->AllocationTopPage());
new_allocation_top = first_empty_page->ObjectAreaStart();
}
if (last_free_size > 0) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
if (new_allocation_top != NULL) {
#ifdef DEBUG
Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
if (!first_empty_page->is_valid()) {
ASSERT(new_allocation_top_page == space->AllocationTopPage());
} else if (last_free_size > 0) {
ASSERT(new_allocation_top_page == prec_first_empty_page);
} else {
ASSERT(new_allocation_top_page == first_empty_page);
}
#endif
space->SetTop(new_allocation_top);
}
}
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes) {
int size_in_bytes,
bool add_to_freelist) {
Heap::ClearRSetRange(start, size_in_bytes);
Heap::old_pointer_space()->Free(start, size_in_bytes);
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes) {
Heap::old_data_space()->Free(start, size_in_bytes);
int size_in_bytes,
bool add_to_freelist) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes) {
Heap::code_space()->Free(start, size_in_bytes);
int size_in_bytes,
bool add_to_freelist) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes) {
int size_in_bytes,
bool add_to_freelist) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
@ -1346,13 +1445,14 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a);
Heap::map_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes) {
int size_in_bytes,
bool add_to_freelist) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
@ -1361,7 +1461,7 @@ void MarkCompactCollector::DeallocateCellBlock(Address start,
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a);
Heap::cell_space()->Free(a, add_to_freelist);
}
}

30
deps/v8/src/mark-compact.h

@ -37,7 +37,11 @@ namespace internal {
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function for non-live blocks in the old generation.
typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
bool add_to_freelist);
// Forward declarations.
@ -313,11 +317,25 @@ class MarkCompactCollector: public AllStatic {
// Callback functions for deallocating non-live blocks in the old
// generation.
static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
static void DeallocateOldDataBlock(Address start, int size_in_bytes);
static void DeallocateCodeBlock(Address start, int size_in_bytes);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static void DeallocateCellBlock(Address start, int size_in_bytes);
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked

14
deps/v8/src/regexp.js

@ -115,7 +115,9 @@ function CompileRegExp(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
return %_RegExpExec(regexp, string, index, lastMatchInfo);
var result = %_RegExpExec(regexp, string, index, lastMatchInfo);
if (result !== null) lastMatchInfoOverride = null;
return result;
}
@ -136,7 +138,7 @@ var regExpCache = new RegExpCache();
function CloneRegExpResult(array) {
if (array == null) return null;
if (array == null) return null;
var length = array.length;
var answer = %_RegExpConstructResult(length, array.index, array.input);
for (var i = 0; i < length; i++) {
@ -237,7 +239,7 @@ function RegExpExec(string) {
cache.type = 'exec';
return matchIndices; // No match.
}
lastMatchInfoOverride = null;
var result = BuildResultFromMatchInfo(matchIndices, s);
if (this.global) {
@ -312,7 +314,7 @@ function RegExpTest(string) {
cache.answer = false;
return false;
}
lastMatchInfoOverride = null;
if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
cache.answer = true;
return true;
@ -340,7 +342,9 @@ function RegExpToString() {
// on the captures array of the last successful match and the subject string
// of the last successful match.
function RegExpGetLastMatch() {
if (lastMatchInfoOverride) { return lastMatchInfoOverride[0]; }
if (lastMatchInfoOverride !== null) {
return lastMatchInfoOverride[0];
}
var regExpSubject = LAST_SUBJECT(lastMatchInfo);
return SubString(regExpSubject,
lastMatchInfo[CAPTURE0],

2
deps/v8/src/runtime.cc

@ -9766,7 +9766,7 @@ static Object* Runtime_LiveEditCheckAndDropActivations(Arguments args) {
}
// Compares 2 strings line-by-line and returns diff in form of JSArray of
// triplets (pos1, len1, len2) describing list of diff chunks.
// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
static Object* Runtime_LiveEditCompareStringsLinewise(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;

3
deps/v8/src/serialize.cc

@ -836,6 +836,9 @@ void Deserializer::ReadChunk(Object** current,
case START_NEW_PAGE_SERIALIZATION: {
int space = source_->Get();
pages_[space].Add(last_object_address_);
if (space == CODE_SPACE) {
CPU::FlushICache(last_object_address_, Page::kPageSize);
}
break;
}
case NATIVES_STRING_RESOURCE: {

34
deps/v8/src/spaces-inl.h

@ -145,6 +145,40 @@ bool Page::IsRSetSet(Address address, int offset) {
}
bool Page::GetPageFlag(PageFlag flag) {
return (flags & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
flags |= flag;
} else {
flags &= ~flag;
}
}
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
void Page::SetWasInUseBeforeMC(bool was_in_use) {
SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
}
bool Page::IsLargeObjectPage() {
return !GetPageFlag(IS_NORMAL_PAGE);
}
void Page::SetIsLargeObjectPage(bool is_large_object_page) {
SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
}
// -----------------------------------------------------------------------------
// MemoryAllocator

212
deps/v8/src/spaces.cc

@ -524,7 +524,7 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
p->is_normal_page = 1;
p->SetIsLargeObjectPage(false);
page_addr += Page::kPageSize;
}
@ -568,6 +568,15 @@ Page* MemoryAllocator::FreePages(Page* p) {
}
void MemoryAllocator::FreeAllPages(PagedSpace* space) {
for (int i = 0, length = chunks_.length(); i < length; i++) {
if (chunks_[i].owner() == space) {
DeleteChunk(i);
}
}
}
void MemoryAllocator::DeleteChunk(int chunk_id) {
ASSERT(IsValidChunk(chunk_id));
@ -622,6 +631,74 @@ void MemoryAllocator::ReportStatistics() {
#endif
void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
Page** first_page,
Page** last_page,
Page** last_page_in_use) {
Page* first = NULL;
Page* last = NULL;
for (int i = 0, length = chunks_.length(); i < length; i++) {
ChunkInfo& chunk = chunks_[i];
if (chunk.owner() == space) {
if (first == NULL) {
Address low = RoundUp(chunk.address(), Page::kPageSize);
first = Page::FromAddress(low);
}
last = RelinkPagesInChunk(i,
chunk.address(),
chunk.size(),
last,
last_page_in_use);
}
}
if (first_page != NULL) {
*first_page = first;
}
if (last_page != NULL) {
*last_page = last;
}
}
Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
Address chunk_start,
size_t chunk_size,
Page* prev,
Page** last_page_in_use) {
Address page_addr = RoundUp(chunk_start, Page::kPageSize);
int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
if (prev->is_valid()) {
SetNextPage(prev, Page::FromAddress(page_addr));
}
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
}
// Set the next page of the last page to 0.
Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
last_page->opaque_header = OffsetFrom(0) | chunk_id;
if (last_page->WasInUseBeforeMC()) {
*last_page_in_use = last_page;
}
return last_page;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation
@ -677,6 +754,8 @@ bool PagedSpace::Setup(Address start, size_t size) {
// Use first_page_ for allocation.
SetAllocationInfo(&allocation_info_, first_page_);
page_list_is_chunk_ordered_ = true;
return true;
}
@ -687,9 +766,8 @@ bool PagedSpace::HasBeenSetup() {
void PagedSpace::TearDown() {
first_page_ = MemoryAllocator::FreePages(first_page_);
ASSERT(!first_page_->is_valid());
MemoryAllocator::FreeAllPages(this);
first_page_ = NULL;
accounting_stats_.Clear();
}
@ -874,6 +952,12 @@ int PagedSpace::CountTotalPages() {
void PagedSpace::Shrink() {
if (!page_list_is_chunk_ordered_) {
// We can't shrink space if pages is not chunk-ordered
// (see comment for class MemoryAllocator for definition).
return;
}
// Release half of free pages.
Page* top_page = AllocationTopPage();
ASSERT(top_page->is_valid());
@ -955,7 +1039,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@ -1363,7 +1447,7 @@ static void ClearCodeKindStatistics() {
static void ReportCodeKindStatistics() {
const char* table[Code::NUMBER_OF_KINDS];
const char* table[Code::NUMBER_OF_KINDS] = { NULL };
#define CASE(name) \
case Code::name: table[Code::name] = #name; \
@ -1782,6 +1866,9 @@ Object* FixedSizeFreeList::Allocate() {
// OldSpace implementation
void OldSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
PagedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) {
// Reset relocation info. During a compacting collection, everything in
// the space is considered 'available' and we will rediscover live data
@ -1852,6 +1939,112 @@ bool NewSpace::ReserveSpace(int bytes) {
}
void PagedSpace::FreePages(Page* prev, Page* last) {
if (last == AllocationTopPage()) {
// Pages are already at the end of used pages.
return;
}
Page* first = NULL;
// Remove pages from the list.
if (prev == NULL) {
first = first_page_;
first_page_ = last->next_page();
} else {
first = prev->next_page();
MemoryAllocator::SetNextPage(prev, last->next_page());
}
// Attach it after the last page.
MemoryAllocator::SetNextPage(last_page_, first);
last_page_ = last;
MemoryAllocator::SetNextPage(last, NULL);
// Clean them up.
do {
first->ClearRSet();
first = first->next_page();
} while (first != NULL);
// Order of pages in this space might no longer be consistent with
// order of pages in chunks.
page_list_is_chunk_ordered_ = false;
}
void PagedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
// to skip unused pages. Update flag value for all pages in space.
PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
Page* last_in_use = AllocationTopPage();
bool in_use = true;
while (all_pages_iterator.has_next()) {
Page* p = all_pages_iterator.next();
p->SetWasInUseBeforeMC(in_use);
if (p == last_in_use) {
// We passed a page containing allocation top. All consequent
// pages are not used.
in_use = false;
}
}
if (!page_list_is_chunk_ordered_) {
Page* new_last_in_use = Page::FromAddress(NULL);
MemoryAllocator::RelinkPageListInChunkOrder(this,
&first_page_,
&last_page_,
&new_last_in_use);
ASSERT(new_last_in_use->is_valid());
if (new_last_in_use != last_in_use) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
if (size_in_bytes > 0) {
// There is still some space left on this page. Create a fake
// object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page
// correctly.
Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
size_in_bytes);
}
// New last in use page was in the middle of the list before
// sorting so it full.
SetTop(new_last_in_use->AllocationTop());
ASSERT(AllocationTopPage() == new_last_in_use);
ASSERT(AllocationTopPage()->WasInUseBeforeMC());
}
PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
while (pages_in_use_iterator.has_next()) {
Page* p = pages_in_use_iterator.next();
if (!p->WasInUseBeforeMC()) {
// Empty page is in the middle of a sequence of used pages.
// Create a fake object which will occupy all free space on this page.
// Otherwise iterators would not be able to scan this page correctly.
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
page_list_is_chunk_ordered_ = true;
}
}
}
bool PagedSpace::ReserveSpace(int bytes) {
Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
@ -2263,6 +2456,9 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); }
// FixedSpace implementation
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
PagedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
@ -2360,7 +2556,7 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
@ -2605,7 +2801,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// large object page. If the chunk_size happened to be written there, its
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->is_normal_page &= ~0x1;
page->SetIsLargeObjectPage(true);
page->ClearRSet();
int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) {

121
deps/v8/src/spaces.h

@ -167,8 +167,17 @@ class Page {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
}
// True if this page was in use before current compaction started.
// Result is valid only for pages owned by paged spaces and
// only after PagedSpace::PrepareForMarkCompact was called.
inline bool WasInUseBeforeMC();
inline void SetWasInUseBeforeMC(bool was_in_use);
// True if this page is a large object page.
bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
inline bool IsLargeObjectPage();
inline void SetIsLargeObjectPage(bool is_large_object_page);
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
@ -244,6 +253,14 @@ class Page {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
WAS_IN_USE_BEFORE_MC = 1 << 1
};
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
//---------------------------------------------------------------------------
// Page header description.
//
@ -262,7 +279,8 @@ class Page {
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
int is_normal_page;
// For normal pages this word is used to store various page flags.
int flags;
// The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
@ -407,6 +425,13 @@ class CodeRange : public AllStatic {
//
// The memory allocator also allocates chunks for the large object space, but
// they are managed by the space itself. The new space does not expand.
//
// The fact that pages for paged spaces are allocated and deallocated in chunks
// induces a constraint on the order of pages in a linked lists. We say that
// pages are linked in the chunk-order if and only if every two consecutive
// pages from the same chunk are consecutive in the linked list.
//
class MemoryAllocator : public AllStatic {
public:
@ -466,13 +491,18 @@ class MemoryAllocator : public AllStatic {
static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner);
// Frees pages from a given page and after. If 'p' is the first page
// of a chunk, pages from 'p' are freed and this function returns an
// invalid page pointer. Otherwise, the function searches a page
// after 'p' that is the first page of a chunk. Pages after the
// found page are freed and the function returns 'p'.
// Frees pages from a given page and after. Requires pages to be
// linked in chunk-order (see comment for class).
// If 'p' is the first page of a chunk, pages from 'p' are freed
// and this function returns an invalid page pointer.
// Otherwise, the function searches a page after 'p' that is
// the first page of a chunk. Pages after the found page
// are freed and the function returns 'p'.
static Page* FreePages(Page* p);
// Frees all pages owned by given space.
static void FreeAllPages(PagedSpace* space);
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
@ -511,6 +541,15 @@ class MemoryAllocator : public AllStatic {
static Page* FindFirstPageInSameChunk(Page* p);
static Page* FindLastPageInSameChunk(Page* p);
// Relinks list of pages owned by space to make it chunk-ordered.
// Returns new first and last pages of space.
// Also returns last page in relinked list which has WasInUsedBeforeMC
// flag set.
static void RelinkPageListInChunkOrder(PagedSpace* space,
Page** first_page,
Page** last_page,
Page** last_page_in_use);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static inline void Protect(Address start, size_t size);
@ -599,6 +638,12 @@ class MemoryAllocator : public AllStatic {
// used as a marking stack and its page headers are destroyed.
static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
static Page* RelinkPagesInChunk(int chunk_id,
Address chunk_start,
size_t chunk_size,
Page* prev,
Page** last_page_in_use);
};
@ -880,9 +925,16 @@ class PagedSpace : public Space {
void ClearRSet();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact) = 0;
virtual void PrepareForMarkCompact(bool will_compact);
// The top of allocation in a page in this space. Undefined if page is unused.
Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: PageAllocationLimit(page);
}
virtual Address PageAllocationTop(Page* page) = 0;
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
@ -920,6 +972,16 @@ class PagedSpace : public Space {
// Used by ReserveSpace.
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
// Free all pages in range from prev (exclusive) to last (inclusive).
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
// Set space allocation info.
void SetTop(Address top) {
allocation_info_.top = top;
allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
}
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
@ -968,6 +1030,9 @@ class PagedSpace : public Space {
static void ResetCodeStatistics();
#endif
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
protected:
// Maximum capacity of this space.
int max_capacity_;
@ -982,6 +1047,10 @@ class PagedSpace : public Space {
// Expand and Shrink.
Page* last_page_;
// True if pages owned by this space are linked in chunk-order.
// See comment for class MemoryAllocator for definition of chunk-order.
bool page_list_is_chunk_ordered_;
// Normal allocation information.
AllocationInfo allocation_info_;
@ -1043,8 +1112,6 @@ class PagedSpace : public Space {
void DoPrintRSet(const char* space_name);
#endif
private:
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
// Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
@ -1664,17 +1731,22 @@ class OldSpace : public PagedSpace {
// pointer).
int AvailableFree() { return free_list_.available(); }
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
void Free(Address start, int size_in_bytes) {
int wasted_bytes = free_list_.Free(start, size_in_bytes);
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
void Free(Address start, int size_in_bytes, bool add_to_freelist) {
accounting_stats_.DeallocateBytes(size_in_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
if (add_to_freelist) {
int wasted_bytes = free_list_.Free(start, size_in_bytes);
accounting_stats_.WasteBytes(wasted_bytes);
}
}
// Prepare for full garbage collection. Resets the relocation pointer and
@ -1727,17 +1799,20 @@ class FixedSpace : public PagedSpace {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - page_extra_;
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd() - page_extra_;
}
int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
void Free(Address start, bool add_to_freelist) {
if (add_to_freelist) {
free_list_.Free(start);
}
accounting_stats_.DeallocateBytes(object_size_in_bytes_);
}

107
deps/v8/src/string.js

@ -175,9 +175,9 @@ function StringLocaleCompare(other) {
// ECMA-262 section 15.5.4.10
function StringMatch(regexp) {
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
if (!regexp.global) return regexp.exec(subject);
if (IS_REGEXP(regexp)) {
if (!regexp.global) return regexp.exec(subject);
var cache = regExpCache;
var saveAnswer = false;
@ -435,63 +435,63 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) {
// array to use in the future, or until the original is written back.
resultArray = $Array(16);
}
try {
// Must handle exceptions thrown by the replace functions correctly,
// including unregistering global regexps.
var res = %RegExpExecMultiple(regexp,
subject,
lastMatchInfo,
resultArray);
regexp.lastIndex = 0;
if (IS_NULL(res)) {
// No matches at all.
return subject;
}
var len = res.length;
var i = 0;
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
while (i < len) {
var elem = res[i];
if (%_IsSmi(elem)) {
if (elem > 0) {
match_start = (elem >> 11) + (elem & 0x7ff);
} else {
match_start = res[++i] - elem;
}
var res = %RegExpExecMultiple(regexp,
subject,
lastMatchInfo,
resultArray);
regexp.lastIndex = 0;
if (IS_NULL(res)) {
// No matches at all.
return subject;
}
var len = res.length;
var i = 0;
if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
var match_start = 0;
var override = [null, 0, subject];
while (i < len) {
var elem = res[i];
if (%_IsSmi(elem)) {
if (elem > 0) {
match_start = (elem >> 11) + (elem & 0x7ff);
} else {
var func_result = replace.call(null, elem, match_start, subject);
if (!IS_STRING(func_result)) {
func_result = NonStringToString(func_result);
}
res[i] = func_result;
match_start += elem.length;
match_start = res[++i] - elem;
}
i++;
} else {
override[0] = elem;
override[1] = match_start;
lastMatchInfoOverride = override;
var func_result = replace.call(null, elem, match_start, subject);
if (!IS_STRING(func_result)) {
func_result = NonStringToString(func_result);
}
res[i] = func_result;
match_start += elem.length;
}
} else {
while (i < len) {
var elem = res[i];
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
lastMatchInfoOverride = elem;
var func_result = replace.apply(null, elem);
if (!IS_STRING(func_result)) {
func_result = NonStringToString(func_result);
}
res[i] = func_result;
i++;
}
} else {
while (i < len) {
var elem = res[i];
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
lastMatchInfoOverride = elem;
var func_result = replace.apply(null, elem);
if (!IS_STRING(func_result)) {
func_result = NonStringToString(func_result);
}
i++;
res[i] = func_result;
}
i++;
}
var result = new ReplaceResultBuilder(subject, res);
return result.generate();
} finally {
lastMatchInfoOverride = null;
resultArray.length = 0;
reusableReplaceArray = resultArray;
}
var resultBuilder = new ReplaceResultBuilder(subject, res);
var result = resultBuilder.generate();
resultArray.length = 0;
reusableReplaceArray = resultArray;
return result;
} else { // Not a global regexp, no need to loop.
var matchInfo = DoRegExpExec(regexp, subject, 0);
if (IS_NULL(matchInfo)) return subject;
@ -542,7 +542,6 @@ function StringSearch(re) {
var s = TO_STRING_INLINE(this);
var match = DoRegExpExec(regexp, s, 0);
if (match) {
lastMatchInfo = match;
return match[CAPTURE0];
}
return -1;

4
deps/v8/src/version.cc

@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
#define BUILD_NUMBER 4
#define PATCH_LEVEL 2
#define BUILD_NUMBER 5
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the

163
deps/v8/src/x64/codegen-x64.cc

@ -304,8 +304,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
#endif
// New scope to get automatic timing calculation.
{ // NOLINT
HistogramTimerScope codegen_timer(&Counters::code_generation);
{ HistogramTimerScope codegen_timer(&Counters::code_generation);
CodeGenState state(this);
// Entry:
@ -3118,6 +3117,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
GenericUnaryOpStub stub(Token::SUB, overwrite);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
answer.set_type_info(TypeInfo::Number());
frame_->Push(&answer);
break;
}
@ -3141,6 +3141,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->Spill(answer.reg());
__ SmiNot(answer.reg(), answer.reg());
continue_label.Bind(&answer);
answer.set_type_info(TypeInfo::Smi());
frame_->Push(&answer);
break;
}
@ -3149,6 +3150,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
// Smi check.
JumpTarget continue_label;
Result operand = frame_->Pop();
TypeInfo operand_info = operand.type_info();
operand.ToRegister();
Condition is_smi = masm_->CheckSmi(operand.reg());
continue_label.Branch(is_smi, &operand);
@ -3157,10 +3159,16 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
CALL_FUNCTION, 1);
continue_label.Bind(&answer);
if (operand_info.IsSmi()) {
answer.set_type_info(TypeInfo::Smi());
} else if (operand_info.IsInteger32()) {
answer.set_type_info(TypeInfo::Integer32());
} else {
answer.set_type_info(TypeInfo::Number());
}
frame_->Push(&answer);
break;
}
default:
UNREACHABLE();
}
@ -4297,17 +4305,23 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
frame_->Push(Factory::undefined_value());
return;
}
Handle<FixedArray> cache_obj(
FixedArray::cast(jsfunction_result_caches->get(cache_id)));
Load(args->at(1));
Result key = frame_->Pop();
key.ToRegister();
Result cache = allocator()->Allocate();
__ movq(cache.reg(), cache_obj, RelocInfo::EMBEDDED_OBJECT);
ASSERT(cache.is_valid());
__ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
__ movq(cache.reg(),
FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
__ movq(cache.reg(),
ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
__ movq(cache.reg(),
FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
Result tmp = allocator()->Allocate();
ASSERT(tmp.is_valid());
DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
cache.reg(),
@ -5297,6 +5311,15 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
static bool CouldBeNaN(const Result& result) {
if (result.type_info().IsSmi()) return false;
if (result.type_info().IsInteger32()) return false;
if (!result.is_constant()) return true;
if (!result.handle()->IsHeapNumber()) return false;
return isnan(HeapNumber::cast(*result.handle())->value());
}
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict,
@ -5614,15 +5637,29 @@ void CodeGenerator::Comparison(AstNode* node,
right_side.Unuse();
dest->Split(cc);
}
} else { // Neither side is a constant Smi or null.
} else {
// Neither side is a constant Smi, constant 1-char string, or constant null.
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi());
NaNInformation nan_info =
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
kBothCouldBeNaN :
kCantBothBeNaN;
left_side.ToRegister();
right_side.ToRegister();
if (known_non_smi) {
// If at least one of the objects is not NaN, then if the objects
// are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmpq(left_side.reg(), right_side.reg());
dest->true_target()->Branch(equal);
}
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
@ -5642,7 +5679,14 @@ void CodeGenerator::Comparison(AstNode* node,
Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
is_smi.Branch(both_smi);
// When non-smi, call out to the compare stub.
// When non-smi, call out to the compare stub, after inlined checks.
// If at least one of the objects is not NaN, then if the objects
// are identical, they are equal.
if (nan_info == kCantBothBeNaN && cc == equal) {
__ cmpq(left_side.reg(), right_side.reg());
dest->true_target()->Branch(equal);
}
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
__ SmiTest(answer.reg()); // Sets both zero and sign flags.
@ -5691,6 +5735,57 @@ void DeferredInlineBinaryOperation::Generate() {
}
static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
Token::Value op,
const Result& right,
const Result& left) {
// Set TypeInfo of result according to the operation performed.
// We rely on the fact that smis have a 32 bit payload on x64.
STATIC_ASSERT(kSmiValueSize == 32);
switch (op) {
case Token::COMMA:
return right.type_info();
case Token::OR:
case Token::AND:
// Result type can be either of the two input types.
return operands_type;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
// Result is always a smi.
return TypeInfo::Smi();
case Token::SAR:
case Token::SHL:
// Result is always a smi.
return TypeInfo::Smi();
case Token::SHR:
// Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
return (right.is_constant() && right.handle()->IsSmi()
&& (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
? TypeInfo::Smi()
: TypeInfo::Number();
case Token::ADD:
if (operands_type.IsNumber()) {
return TypeInfo::Number();
} else if (left.type_info().IsString() || right.type_info().IsString()) {
return TypeInfo::String();
} else {
return TypeInfo::Unknown();
}
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
// Result is always a number.
return TypeInfo::Number();
default:
UNREACHABLE();
}
UNREACHABLE();
return TypeInfo::Unknown();
}
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode) {
@ -5756,6 +5851,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
TypeInfo operands_type =
TypeInfo::Combine(left.type_info(), right.type_info());
TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
Result answer;
if (left_is_non_smi_constant || right_is_non_smi_constant) {
GenericBinaryOpStub stub(op,
@ -5786,56 +5883,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
}
}
// Set TypeInfo of result according to the operation performed.
// We rely on the fact that smis have a 32 bit payload on x64.
ASSERT(kSmiValueSize == 32);
TypeInfo result_type = TypeInfo::Unknown();
switch (op) {
case Token::COMMA:
result_type = right.type_info();
break;
case Token::OR:
case Token::AND:
// Result type can be either of the two input types.
result_type = operands_type;
break;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
// Result is always a smi.
result_type = TypeInfo::Smi();
break;
case Token::SAR:
case Token::SHL:
// Result is always a smi.
result_type = TypeInfo::Smi();
break;
case Token::SHR:
// Result of x >>> y is always a smi if y >= 1, otherwise a number.
result_type = (right.is_constant() && right.handle()->IsSmi()
&& Smi::cast(*right.handle())->value() >= 1)
? TypeInfo::Smi()
: TypeInfo::Number();
break;
case Token::ADD:
if (operands_type.IsNumber()) {
result_type = TypeInfo::Number();
} else if (operands_type.IsString()) {
result_type = TypeInfo::String();
} else {
result_type = TypeInfo::Unknown();
}
break;
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
// Result is always a number.
result_type = TypeInfo::Number();
break;
default:
UNREACHABLE();
}
answer.set_type_info(result_type);
frame_->Push(&answer);
}

8
deps/v8/test/cctest/test-heap.cc

@ -830,11 +830,11 @@ TEST(LargeObjectSpaceContains) {
}
CHECK(bytes_to_page > FixedArray::kHeaderSize);
int* is_normal_page_ptr = &Page::FromAddress(next_page)->is_normal_page;
Address is_normal_page_addr = reinterpret_cast<Address>(is_normal_page_ptr);
int* flags_ptr = &Page::FromAddress(next_page)->flags;
Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate =
static_cast<int>(is_normal_page_addr - current_top) + kPointerSize;
static_cast<int>(flags_addr - current_top) + kPointerSize;
int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
kPointerSize;
@ -843,7 +843,7 @@ TEST(LargeObjectSpaceContains) {
Heap::AllocateFixedArray(n_elements));
int index = n_elements - 1;
CHECK_EQ(is_normal_page_ptr,
CHECK_EQ(flags_ptr,
HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
array->set(index, Smi::FromInt(0));
// This chould have turned next page into LargeObjectPage:

2
deps/v8/test/cctest/test-spaces.cc

@ -77,7 +77,7 @@ TEST(Page) {
CHECK(p->is_valid());
p->opaque_header = 0;
p->is_normal_page = 0x1;
p->SetIsLargeObjectPage(false);
CHECK(!p->next_page()->is_valid());
CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);

45
deps/v8/test/mjsunit/binary-op-newspace.js

@ -0,0 +1,45 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/**
* @fileoverview Check that a mod where the stub code hits a failure
* in heap number allocation still works.
*/
// Flags: --max-new-space-size=131072
function f(x) {
return x % 3;
}
function test() {
for (var i = 0; i < 20000; i++) {
assertEquals(-1 / 0, 1 / f(-3));
}
}
test();

27
deps/v8/test/mjsunit/debug-liveedit-newsource.js

@ -30,18 +30,39 @@
Debug = debug.Debug
eval("var something1 = 25; "
+ " function ChooseAnimal() { return 'Cat'; } "
+ " ChooseAnimal.Helper = function() { return 'Help!'; }");
eval("var something1 = 25; \n"
+ "var something2 = 2010; \n"
+ "function ChooseAnimal() {\n"
+ " return 'Cat';\n"
+ "} \n"
+ "function ChooseFurniture() {\n"
+ " return 'Table';\n"
+ "} \n"
+ "function ChooseNumber() { return 17; } \n"
+ "ChooseAnimal.Factory = function Factory() {\n"
+ " return function FactoryImpl(name) {\n"
+ " return 'Help ' + name;\n"
+ " }\n"
+ "}\n");
assertEquals("Cat", ChooseAnimal());
assertEquals(25, something1);
var script = Debug.findScript(ChooseAnimal);
var new_source = script.source.replace("Cat", "Cap' + 'yb' + 'ara");
var new_source = new_source.replace("25", "26");
var new_source = new_source.replace("Help", "Hello");
var new_source = new_source.replace("17", "18");
print("new source: " + new_source);
var change_log = new Array();
Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
print("Change log: " + JSON.stringify(change_log) + "\n");
assertEquals("Capybara", ChooseAnimal());
// Global variable do not get changed (without restarting script).
assertEquals(25, something1);
// Function is oneliner, so currently it is treated as damaged and not patched.
assertEquals(17, ChooseNumber());
assertEquals("Hello Peter", ChooseAnimal.Factory()("Peter"));

2
deps/v8/test/mjsunit/debug-stepin-accessor.js

@ -36,7 +36,7 @@ var expected_source_line_text = null;
var expected_function_name = null;
// Simple debug event handler which first time will cause 'step in' action
// to get into g.call and than check that execution is pauesed inside
// to get into g.call and than check that execution is stopped inside
// function 'g'.
function listener(event, exec_state, event_data, data) {
try {

43
deps/v8/test/mjsunit/regress/regress-685.js

@ -0,0 +1,43 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Regression test for http://code.google.com/p/v8/issues/detail?id=685.
//
// Test that keyed load IC generic stub uses unsigned comparison for
// for the length field of arrays.
//
// The test passes if it does not crash.
function test() {
var N = 0xFFFFFFFF;
var a = [];
a[N - 1] = 0;
a[N - 2] = 1;
a.reverse();
}
test();

9
deps/v8/test/mjsunit/smi-ops.js

@ -669,3 +669,12 @@ intConversion();
function shiftByZero(n) { return n << 0; }
assertEquals(3, shiftByZero(3.1415));
// Verify that the static type information of x >>> 32 is computed correctly.
function LogicalShiftRightByMultipleOf32(x) {
x = x >>> 32;
return x + x;
}
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));
assertEquals(4589934592, LogicalShiftRightByMultipleOf32(-2000000000));

24
deps/v8/test/mjsunit/string-replace.js

@ -30,7 +30,7 @@
*/
function replaceTest(result, subject, pattern, replacement) {
var name =
var name =
"\"" + subject + "\".replace(" + pattern + ", " + replacement + ")";
assertEquals(result, subject.replace(pattern, replacement), name);
}
@ -114,8 +114,8 @@ replaceTest("xaxe$xcx", short, /b/, "e$");
replaceTest("xaxe$xcx", short, /b/g, "e$");
replaceTest("[$$$1$$a1abb1bb0$002$3$03][$$$1$$b1bcc1cc0$002$3$03]c",
"abc", /(.)(?=(.))/g, "[$$$$$$1$$$$$11$01$2$21$02$020$002$3$03]");
replaceTest("[$$$1$$a1abb1bb0$002$3$03][$$$1$$b1bcc1cc0$002$3$03]c",
"abc", /(.)(?=(.))/g, "[$$$$$$1$$$$$11$01$2$21$02$020$002$3$03]");
// Replace with functions.
@ -189,5 +189,21 @@ replaceTest("string true", "string x", /x/g, function() { return true; });
replaceTest("string null", "string x", /x/g, function() { return null; });
replaceTest("string undefined", "string x", /x/g, function() { return undefined; });
replaceTest("aundefinedbundefinedcundefined",
replaceTest("aundefinedbundefinedcundefined",
"abc", /(.)|(.)/g, function(m, m1, m2, i, s) { return m1+m2; });
// Test nested calls to replace, including that it sets RegExp.$& correctly.
function replacer(m,i,s) {
assertEquals(m,RegExp['$&']);
return "[" + RegExp['$&'] + "-"
+ m.replace(/./g,"$&$&") + "-"
+ m.replace(/./g,function() { return RegExp['$&']; })
+ "-" + RegExp['$&'] + "]";
}
replaceTest("[ab-aabb-ab-b][az-aazz-az-z]",
"abaz", /a./g, replacer);
replaceTest("[ab-aabb-ab-b][az-aazz-az-z]",
"abaz", /a(.)/g, replacer);

26
deps/v8/tools/gyp/v8.gyp

@ -29,7 +29,7 @@
'variables': {
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
'target_arch%': 'ia32',
'v8_target_arch%': '<(target_arch)',
'v8_use_snapshot%': 'true',
},
'target_defaults': {
@ -39,17 +39,17 @@
'ENABLE_VMSTATE_TRACKING',
],
'conditions': [
['target_arch=="arm"', {
['v8_target_arch=="arm"', {
'defines': [
'V8_TARGET_ARCH_ARM',
],
}],
['target_arch=="ia32"', {
['v8_target_arch=="ia32"', {
'defines': [
'V8_TARGET_ARCH_IA32',
],
}],
['target_arch=="x64"', {
['v8_target_arch=="x64"', {
'defines': [
'V8_TARGET_ARCH_X64',
],
@ -204,7 +204,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
@ -264,8 +264,8 @@
'../../src/cpu-profiler-inl.h',
'../../src/cpu-profiler.cc',
'../../src/cpu-profiler.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
'../../src/dateparser.cc',
'../../src/dateparser.h',
'../../src/dateparser-inl.h',
@ -396,8 +396,8 @@
'../../src/token.h',
'../../src/top.cc',
'../../src/top.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/type-info.cc',
'../../src/type-info.h',
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
@ -424,7 +424,7 @@
'../../src/zone.h',
],
'conditions': [
['target_arch=="arm"', {
['v8_target_arch=="arm"', {
'include_dirs+': [
'../../src/arm',
],
@ -470,7 +470,7 @@
}]
]
}],
['target_arch=="ia32"', {
['v8_target_arch=="ia32"', {
'include_dirs+': [
'../../src/ia32',
],
@ -505,7 +505,7 @@
'../../src/ia32/virtual-frame-ia32.h',
],
}],
['target_arch=="x64"', {
['v8_target_arch=="x64"', {
'include_dirs+': [
'../../src/x64',
],
@ -656,7 +656,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]

2
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -1588,7 +1588,9 @@
GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)",
DEBUG,
ENABLE_LOGGING_AND_PROFILING,
V8_ENABLE_CHECKS,
ENABLE_VMSTATE_TRACKING,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES;

Loading…
Cancel
Save