diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 13061120d3..83ebc02530 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,53 @@
+2009-07-13: Version 1.2.14
+
+ Added separate paged heap space for global property cells and
+ avoid updating the write barrier when storing into them.
+
+ Improved peep-hole optimization on ARM platforms by not emitting
+ unnecessary debug information.
+
+ Re-enabled ICs for loads and calls that skip a global object
+ during lookup through the prototype chain.
+
+ Allowed access through global proxies to use ICs.
+
+ Fixed issue 401.
+
+
+2009-07-09: Version 1.2.13
+
+ Fixed issue 397, issue 398, and issue 399.
+
+ Added support for breakpoint groups.
+
+ Fixed bugs introduced with the new global object representation.
+
+ Fixed a few bugs in the ARM code generator.
+
+
+2009-07-06: Version 1.2.12
+
+ Added stack traces collection to Error objects accessible through
+ the e.stack property.
+
+ Changed RegExp parser to use a recursive data structure instead of
+ stack-based recursion.
+
+ Optimized Date object construction and string concatenation.
+
+ Improved performance of div, mod, and mul on ARM platforms.
+
+
+2009-07-02: Version 1.2.11
+
+ Improved performance on IA-32 and ARM.
+
+ Fixed profiler sampler implementation on Mac OS X.
+
+ Changed the representation of global objects to improve
+ performance of adding a lot of new properties.
+
+
2009-06-29: Version 1.2.10
Improved debugger support.
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 0baf71b5d3..78b050d735 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -95,7 +95,12 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = {
'all': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
- 'CPPPATH': [join(root_dir, 'src')]
+ 'CPPPATH': [join(root_dir, 'src')],
+ 'regexp:native': {
+ 'arch:ia32' : {
+ 'CPPDEFINES': ['V8_NATIVE_REGEXP']
+ }
+ }
},
'gcc': {
'all': {
@@ -167,6 +172,7 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'arch:x64': {
+ 'CCFLAGS': ['-fno-strict-aliasing'],
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
'prof:oprofile': {
@@ -546,6 +552,11 @@ SIMPLE_OPTIONS = {
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
+ 'regexp': {
+ 'values': ['native', 'interpreted'],
+ 'default': 'native',
+ 'help': 'Whether to use native or interpreted regexp implementation'
+ },
'snapshot': {
'values': ['on', 'off', 'nobuild'],
'default': 'off',
@@ -677,6 +688,8 @@ def VerifyOptions(env):
return False
if not IsLegal(env, 'sample', ["shell", "process"]):
return False
+ if not IsLegal(env, 'regexp', ["native", "interpreted"]):
+ return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt
index 561e88b39f..eb759cc92c 100644
--- a/deps/v8/benchmarks/README.txt
+++ b/deps/v8/benchmarks/README.txt
@@ -57,4 +57,7 @@ of the benchmark.
Changes from Version 4 to Version 5
===================================
-Removed duplicate line in random seed code.
+Removed duplicate line in random seed code, and changed the name of
+the Object.prototype.inherits function in the DeltaBlue benchmark to
+inheritsFrom to avoid name clashes when running in Chromium with
+extensions enabled.
diff --git a/deps/v8/benchmarks/deltablue.js b/deps/v8/benchmarks/deltablue.js
index 253046f804..7e25d2e13f 100644
--- a/deps/v8/benchmarks/deltablue.js
+++ b/deps/v8/benchmarks/deltablue.js
@@ -46,7 +46,7 @@ var DeltaBlue = new BenchmarkSuite('DeltaBlue', 71104, [
/* --- O b j e c t M o d e l --- */
-Object.prototype.inherits = function (shuper) {
+Object.prototype.inheritsFrom = function (shuper) {
function Inheriter() { }
Inheriter.prototype = shuper.prototype;
this.prototype = new Inheriter();
@@ -216,7 +216,7 @@ function UnaryConstraint(v, strength) {
this.addConstraint();
}
-UnaryConstraint.inherits(Constraint);
+UnaryConstraint.inheritsFrom(Constraint);
/**
* Adds this constraint to the constraint graph
@@ -294,7 +294,7 @@ function StayConstraint(v, str) {
StayConstraint.superConstructor.call(this, v, str);
}
-StayConstraint.inherits(UnaryConstraint);
+StayConstraint.inheritsFrom(UnaryConstraint);
StayConstraint.prototype.execute = function () {
// Stay constraints do nothing
@@ -312,7 +312,7 @@ function EditConstraint(v, str) {
EditConstraint.superConstructor.call(this, v, str);
}
-EditConstraint.inherits(UnaryConstraint);
+EditConstraint.inheritsFrom(UnaryConstraint);
/**
* Edits indicate that a variable is to be changed by imperative code.
@@ -346,7 +346,7 @@ function BinaryConstraint(var1, var2, strength) {
this.addConstraint();
}
-BinaryConstraint.inherits(Constraint);
+BinaryConstraint.inheritsFrom(Constraint);
/**
* Decides if this constratint can be satisfied and which way it
@@ -459,7 +459,7 @@ function ScaleConstraint(src, scale, offset, dest, strength) {
ScaleConstraint.superConstructor.call(this, src, dest, strength);
}
-ScaleConstraint.inherits(BinaryConstraint);
+ScaleConstraint.inheritsFrom(BinaryConstraint);
/**
* Adds this constraint to the constraint graph.
@@ -515,7 +515,7 @@ function EqualityConstraint(var1, var2, strength) {
EqualityConstraint.superConstructor.call(this, var1, var2, strength);
}
-EqualityConstraint.inherits(BinaryConstraint);
+EqualityConstraint.inheritsFrom(BinaryConstraint);
/**
* Enforce this constraint. Assume that it is satisfied.
diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html
index b86c876fe4..99d7be42b9 100644
--- a/deps/v8/benchmarks/revisions.html
+++ b/deps/v8/benchmarks/revisions.html
@@ -22,7 +22,10 @@ the benchmark suite.
-Removed a duplicate line in the base random seed code.
+
Removed duplicate line in random seed code, and changed the name of
+the Object.prototype.inherits function in the DeltaBlue benchmark to
+inheritsFrom to avoid name clashes when running in Chromium with
+extensions enabled.
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 85b13ec9a6..f1057a812a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -244,9 +244,10 @@ v8::internal::Handle v8::internal::Handle::EscapeFrom(
// Implementations of ToLocal
-#define MAKE_TO_LOCAL(Name, From, To) \
+#define MAKE_TO_LOCAL(Name, From, To) \
Local Utils::Name(v8::internal::Handle obj) { \
- return Local(reinterpret_cast(obj.location())); \
+ ASSERT(!obj->IsTheHole()); \
+ return Local(reinterpret_cast(obj.location())); \
}
MAKE_TO_LOCAL(ToLocal, Context, Context)
diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js
index 2981eec598..6451e62603 100644
--- a/deps/v8/src/apinatives.js
+++ b/deps/v8/src/apinatives.js
@@ -51,6 +51,7 @@ function Instantiate(data, name) {
var Constructor = %GetTemplateField(data, kApiConstructorOffset);
var result = Constructor ? new (Instantiate(Constructor))() : {};
ConfigureTemplateInstance(result, data);
+ result = %ToFastProperties(result);
return result;
default:
throw 'Unknown API tag <' + tag + '>';
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 6ec8f460bc..3ed99f9e15 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -491,6 +491,20 @@ static bool fits_shifter(uint32_t imm32,
}
+// We have to use the temporary register for things that can be relocated even
+// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
+// space. There is no guarantee that the relocated location can be similarly
+// encoded.
+static bool MustUseIp(RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ return Serializer::enabled();
+ } else if (rmode == RelocInfo::NONE) {
+ return false;
+ }
+ return true;
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@@ -501,8 +515,7 @@ void Assembler::addrmod1(Instr instr,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
- if ((x.rmode_ != RelocInfo::NONE &&
- x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) ||
+ if (MustUseIp(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@@ -684,6 +697,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
void Assembler::blx(int branch_offset) { // v5 and above
+ WriteRecordedPositions();
ASSERT((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
@@ -693,12 +707,14 @@ void Assembler::blx(int branch_offset) { // v5 and above
void Assembler::blx(Register target, Condition cond) { // v5 and above
+ WriteRecordedPositions();
ASSERT(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
}
@@ -797,6 +813,9 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
addrmod1(cond | 13*B21 | s, r0, dst, src);
}
@@ -816,7 +835,6 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
- ASSERT(!dst.is(src1));
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -825,7 +843,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dst.is(src1));
+ // dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -837,7 +855,7 @@ void Assembler::smlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -850,7 +868,7 @@ void Assembler::smull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -863,7 +881,7 @@ void Assembler::umlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -876,7 +894,7 @@ void Assembler::umull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
- ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ ASSERT(!dstL.is(dstH));
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@@ -906,8 +924,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
- if ((src.rmode_ != RelocInfo::NONE &&
- src.rmode_ != RelocInfo::EXTERNAL_REFERENCE)||
+ if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// immediate operand cannot be encoded, load it first to register ip
RecordRelocInfo(src.rmode_, src.imm32_);
@@ -926,6 +943,9 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ if (dst.is(pc)) {
+ WriteRecordedPositions();
+ }
addrmod2(cond | B26 | L, dst, src);
// Eliminate pattern: push(r), pop(r)
@@ -1263,7 +1283,6 @@ void Assembler::RecordPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_position_ = pos;
- WriteRecordedPositions();
}
@@ -1271,7 +1290,6 @@ void Assembler::RecordStatementPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_statement_position_ = pos;
- WriteRecordedPositions();
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6626619912..3f7ccf54ac 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -41,6 +41,23 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict);
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm);
+static void MultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int);
+static bool IsEasyToMultiplyBy(int x);
+
+
+
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
@@ -683,33 +700,69 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode)
- : op_(op), mode_(mode) { }
+ OverwriteMode mode,
+ int constant_rhs = CodeGenerator::kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { }
private:
Token::Value op_;
OverwriteMode mode_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+
+ static const int kMaxKnownRhs = 0x40000000;
// Minor key encoding in 16 bits.
class ModeBits: public BitField {};
- class OpBits: public BitField {};
+ class OpBits: public BitField {};
+ class KnownIntBits: public BitField {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
- | ModeBits::encode(mode_);
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt());
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm);
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ return key;
+ }
+
const char* GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
case Token::SUB: return "GenericBinaryOpStub_SUB";
case Token::MUL: return "GenericBinaryOpStub_MUL";
case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::MOD: return "GenericBinaryOpStub_MOD";
case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
@@ -721,13 +774,22 @@ class GenericBinaryOpStub : public CodeStub {
}
#ifdef DEBUG
- void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
#endif
};
void CodeGenerator::GenericBinaryOperation(Token::Value op,
- OverwriteMode overwrite_mode) {
+ OverwriteMode overwrite_mode,
+ int constant_rhs) {
VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
@@ -738,6 +800,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
case Token::ADD: // fall through.
case Token::SUB: // fall through.
case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
@@ -746,27 +810,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
case Token::SAR: {
frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x
- GenericBinaryOpStub stub(op, overwrite_mode);
+ GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs);
frame_->CallStub(&stub, 0);
break;
}
- case Token::DIV: {
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1));
- frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
- break;
- }
-
- case Token::MOD: {
- Result arg_count = allocator_->Allocate(r0);
- ASSERT(arg_count.is_valid());
- __ mov(arg_count.reg(), Operand(1));
- frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
- break;
- }
-
case Token::COMMA:
frame_->EmitPop(r0);
// simply discard left value
@@ -830,6 +878,10 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
+ // For these operations there is no optimistic operation that needs to be
+ // reverted.
+ case Token::MUL:
+ case Token::MOD:
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
@@ -860,11 +912,32 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
- GenericBinaryOpStub stub(op_, overwrite_mode_);
+ GenericBinaryOpStub stub(op_, overwrite_mode_, value_);
__ CallStub(&stub);
}
+static bool PopCountLessThanEqual2(unsigned int x) {
+ x &= x - 1;
+ return (x & (x - 1)) == 0;
+}
+
+
+// Returns the index of the lowest bit set.
+static int BitPosition(unsigned x) {
+ int bit_posn = 0;
+ while ((x & 0xf) == 0) {
+ bit_posn += 4;
+ x >>= 4;
+ }
+ while ((x & 1) == 0) {
+ bit_posn++;
+ x >>= 1;
+ }
+ return bit_posn;
+}
+
+
void CodeGenerator::SmiOperation(Token::Value op,
Handle value,
bool reversed,
@@ -884,6 +957,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
JumpTarget exit;
frame_->EmitPop(r0);
+ bool something_to_inline = true;
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
@@ -913,6 +987,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
break;
}
+
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
@@ -934,75 +1009,125 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SHR:
case Token::SAR: {
if (reversed) {
- __ mov(ip, Operand(value));
- frame_->EmitPush(ip);
- frame_->EmitPush(r0);
- GenericBinaryOperation(op, mode);
-
- } else {
- int shift_value = int_value & 0x1f; // least significant 5 bits
- DeferredCode* deferred =
- new DeferredInlineSmiOperation(op, shift_value, false, mode);
- __ tst(r0, Operand(kSmiTagMask));
- deferred->Branch(ne);
- __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
- switch (op) {
- case Token::SHL: {
+ something_to_inline = false;
+ break;
+ }
+ int shift_value = int_value & 0x1f; // least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
+ switch (op) {
+ case Token::SHL: {
+ if (shift_value != 0) {
__ mov(r2, Operand(r2, LSL, shift_value));
- // check that the *unsigned* result fits in a smi
- __ add(r3, r2, Operand(0x40000000), SetCC);
- deferred->Branch(mi);
- break;
}
- case Token::SHR: {
- // LSR by immediate 0 means shifting 32 bits.
- if (shift_value != 0) {
- __ mov(r2, Operand(r2, LSR, shift_value));
- }
- // check that the *unsigned* result fits in a smi
- // neither of the two high-order bits can be set:
- // - 0x80000000: high bit would be lost when smi tagging
- // - 0x40000000: this number would convert to negative when
- // smi tagging these two cases can only happen with shifts
- // by 0 or 1 when handed a valid smi
- __ and_(r3, r2, Operand(0xc0000000), SetCC);
- deferred->Branch(ne);
- break;
+ // check that the *unsigned* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ break;
+ }
+ case Token::SHR: {
+ // LSR by immediate 0 means shifting 32 bits.
+ if (shift_value != 0) {
+ __ mov(r2, Operand(r2, LSR, shift_value));
}
- case Token::SAR: {
- if (shift_value != 0) {
- // ASR by immediate 0 means shifting 32 bits.
- __ mov(r2, Operand(r2, ASR, shift_value));
- }
- break;
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(r3, r2, Operand(0xc0000000), SetCC);
+ deferred->Branch(ne);
+ break;
+ }
+ case Token::SAR: {
+ if (shift_value != 0) {
+ // ASR by immediate 0 means shifting 32 bits.
+ __ mov(r2, Operand(r2, ASR, shift_value));
}
- default: UNREACHABLE();
+ break;
}
- __ mov(r0, Operand(r2, LSL, kSmiTagSize));
- deferred->BindExit();
+ default: UNREACHABLE();
+ }
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ deferred->BindExit();
+ break;
+ }
+
+ case Token::MOD: {
+ if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
+ something_to_inline = false;
+ break;
}
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ unsigned mask = (0x80000000u | kSmiTagMask);
+ __ tst(r0, Operand(mask));
+ deferred->Branch(ne); // Go to deferred code on non-Smis and negative.
+ mask = (int_value << kSmiTagSize) - 1;
+ __ and_(r0, r0, Operand(mask));
+ deferred->BindExit();
break;
}
- default:
- if (!reversed) {
- frame_->EmitPush(r0);
- __ mov(r0, Operand(value));
- frame_->EmitPush(r0);
- } else {
- __ mov(ip, Operand(value));
- frame_->EmitPush(ip);
- frame_->EmitPush(r0);
+ case Token::MUL: {
+ if (!IsEasyToMultiplyBy(int_value)) {
+ something_to_inline = false;
+ break;
+ }
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
+ max_smi_that_wont_overflow <<= kSmiTagSize;
+ unsigned mask = 0x80000000u;
+ while ((mask & max_smi_that_wont_overflow) == 0) {
+ mask |= mask >> 1;
}
- GenericBinaryOperation(op, mode);
+ mask |= kSmiTagMask;
+ // This does a single mask that checks for a too high value in a
+ // conservative way and for a non-Smi. It also filters out negative
+ // numbers, unfortunately, but since this code is inline we prefer
+ // brevity to comprehensiveness.
+ __ tst(r0, Operand(mask));
+ deferred->Branch(ne);
+ MultiplyByKnownInt(masm_, r0, r0, int_value);
+ deferred->BindExit();
+ break;
+ }
+
+ default:
+ something_to_inline = false;
break;
}
+ if (!something_to_inline) {
+ if (!reversed) {
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(value));
+ frame_->EmitPush(r0);
+ GenericBinaryOperation(op, mode, int_value);
+ } else {
+ __ mov(ip, Operand(value));
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
+ GenericBinaryOperation(op, mode, kUnknownIntValue);
+ }
+ }
+
exit.Bind();
}
-void CodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc,
+ Expression* left,
+ Expression* right,
+ bool strict) {
+ if (left != NULL) LoadAndSpill(left);
+ if (right != NULL) LoadAndSpill(right);
+
VirtualFrame::SpilledScope spilled_scope;
// sp[0] : y
// sp[1] : x
@@ -1026,43 +1151,19 @@ void CodeGenerator::Comparison(Condition cc, bool strict) {
__ tst(r2, Operand(kSmiTagMask));
smi.Branch(eq);
- // Perform non-smi comparison by runtime call.
- frame_->EmitPush(r1);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript native;
- int arg_count = 1;
- if (cc == eq) {
- native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- native = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- ASSERT(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- frame_->EmitPush(r0);
- arg_count++;
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- }
+ // Perform non-smi comparison by stub.
+ // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
+ // We call with 0 args because there are 0 on the stack.
+ CompareStub stub(cc, strict);
+ frame_->CallStub(&stub, 0);
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- frame_->EmitPush(r0);
- Result arg_count_register = allocator_->Allocate(r0);
- ASSERT(arg_count_register.is_valid());
- __ mov(arg_count_register.reg(), Operand(arg_count));
- Result result = frame_->InvokeBuiltin(native,
- CALL_JS,
- &arg_count_register,
- arg_count + 1);
+ Result result = allocator_->Allocate(r0);
+ ASSERT(result.is_valid());
__ cmp(result.reg(), Operand(0));
result.Unuse();
exit.Jump();
- // test smi equality by pointer comparison.
+ // Do smi comparisons by pointer comparison.
smi.Bind();
__ cmp(r1, Operand(r0));
@@ -1505,8 +1606,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Duplicate TOS.
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(clause->label());
- Comparison(eq, true);
+ Comparison(eq, NULL, clause->label(), true);
Branch(false, &next_test);
// Before entering the body from the test, remove the switch value from
@@ -2321,16 +2421,22 @@ void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- JumpTarget exit;
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
- Branch(false, &else_);
- then.Bind();
- LoadAndSpill(node->then_expression(), typeof_state());
- exit.Jump();
- else_.Bind();
- LoadAndSpill(node->else_expression(), typeof_state());
- exit.Bind();
+ if (has_valid_frame()) {
+ Branch(false, &else_);
+ }
+ if (has_valid_frame() || then.is_linked()) {
+ then.Bind();
+ LoadAndSpill(node->then_expression(), typeof_state());
+ }
+ if (else_.is_linked()) {
+ JumpTarget exit;
+ if (has_valid_frame()) exit.Jump();
+ else_.Bind();
+ LoadAndSpill(node->else_expression(), typeof_state());
+ if (exit.is_linked()) exit.Bind();
+ }
ASSERT(frame_->height() == original_height + 1);
}
@@ -3180,6 +3286,66 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
}
+void CodeGenerator::GenerateClassOf(ZoneList* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
+
+ // Load the object into r0.
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+
+ // If the object is a smi, we return null.
+ __ tst(r0, Operand(kSmiTagMask));
+ null.Branch(eq);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+ __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
+ null.Branch(lt);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ function.Branch(eq);
+
+ // Check if the constructor in the map is a function.
+ __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+ __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ non_function_constructor.Branch(ne);
+
+ // The r0 register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ __ mov(r0, Operand(Factory::function_class_symbol()));
+ frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ __ mov(r0, Operand(Factory::Object_symbol()));
+ frame_->EmitPush(r0);
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ __ mov(r0, Operand(Factory::null_value()));
+ frame_->EmitPush(r0);
+
+ // All done.
+ leave.Bind();
+}
+
+
void CodeGenerator::GenerateValueOf(ZoneList* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 1);
@@ -3255,7 +3421,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) {
ASSERT(args->length() == 1);
LoadAndSpill(args->at(0));
frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
cc_reg_ = eq;
}
@@ -3290,6 +3456,28 @@ void CodeGenerator::GenerateIsArray(ZoneList* args) {
}
+void CodeGenerator::GenerateIsConstructCall(ZoneList* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 0);
+
+ // Get the frame pointer for the calling frame.
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r1, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(ne, &check_frame_marker);
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ cc_reg_ = eq;
+}
+
+
void CodeGenerator::GenerateArgumentsLength(ZoneList* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 0);
@@ -3423,7 +3611,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
false_target(),
true_target(),
true);
- cc_reg_ = NegateCondition(cc_reg_);
+ // LoadCondition may (and usually does) leave a test and branch to
+ // be emitted by the caller. In that case, negate the condition.
+ if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
} else if (op == Token::DELETE) {
Property* property = node->expression()->AsProperty();
@@ -3495,8 +3685,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
case Token::SUB: {
bool overwrite =
- (node->AsBinaryOperation() != NULL &&
- node->AsBinaryOperation()->ResultOverwriteAllowed());
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
frame_->CallStub(&stub, 0);
break;
@@ -3960,34 +4150,34 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
return;
}
- LoadAndSpill(left);
- LoadAndSpill(right);
switch (op) {
case Token::EQ:
- Comparison(eq, false);
+ Comparison(eq, left, right, false);
break;
case Token::LT:
- Comparison(lt);
+ Comparison(lt, left, right);
break;
case Token::GT:
- Comparison(gt);
+ Comparison(gt, left, right);
break;
case Token::LTE:
- Comparison(le);
+ Comparison(le, left, right);
break;
case Token::GTE:
- Comparison(ge);
+ Comparison(ge, left, right);
break;
case Token::EQ_STRICT:
- Comparison(eq, true);
+ Comparison(eq, left, right, true);
break;
case Token::IN: {
+ LoadAndSpill(left);
+ LoadAndSpill(right);
Result arg_count = allocator_->Allocate(r0);
ASSERT(arg_count.is_valid());
__ mov(arg_count.reg(), Operand(1)); // not counting receiver
@@ -4000,6 +4190,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
case Token::INSTANCEOF: {
+ LoadAndSpill(left);
+ LoadAndSpill(right);
InstanceofStub stub;
Result result = frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
@@ -4291,7 +4483,7 @@ static void CountLeadingZeros(
__ add(zeros, zeros, Operand(2), LeaveCC, eq);
__ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
// Top bit.
- __ tst(scratch, Operand(0x80000000));
+ __ tst(scratch, Operand(0x80000000u));
__ add(zeros, zeros, Operand(1), LeaveCC, eq);
#endif
}
@@ -4443,7 +4635,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
// We test for the special value that has a different exponent. This test
// has the neat side effect of setting the flags according to the sign.
ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int_, Operand(0x80000000));
+ __ cmp(the_int_, Operand(0x80000000u));
__ b(eq, &max_negative_int);
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
@@ -4482,6 +4674,408 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
}
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+ Label* slow,
+ Condition cc) {
+ Label not_identical;
+ __ cmp(r0, Operand(r1));
+ __ b(ne, ¬_identical);
+
+ Register exp_mask_reg = r5;
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ Label heap_number, return_equal;
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ }
+ }
+ __ bind(&return_equal);
+ if (cc == lt) {
+ __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
+ } else if (cc == gt) {
+ __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
+ } else {
+ __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves.
+ }
+ __ mov(pc, Operand(lr)); // Return.
+
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ and_(r3, r2, Operand(exp_mask_reg));
+ __ cmp(r3, Operand(exp_mask_reg));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
+ // (it's a NaN). For <= and >= we need to load r0 with the failing value
+ // if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
+ }
+ __ mov(pc, Operand(lr)); // Return.
+ }
+ // No fall through here.
+
+ __ bind(¬_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+ Label* rhs_not_nan,
+ Label* slow,
+ bool strict) {
+ Label lhs_is_smi;
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &lhs_is_smi);
+
+ // Rhs is a Smi. Check whether the non-smi is a heap number.
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal (r0 is already not zero)
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Rhs is a smi, lhs is a number.
+ __ push(lr);
+ __ mov(r7, Operand(r1));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ // r3 and r2 are rhs as double.
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ // We now have both loaded as doubles but we can skip the lhs nan check
+ // since it's a Smi.
+ __ pop(lr);
+ __ jmp(rhs_not_nan);
+
+ __ bind(&lhs_is_smi);
+ // Lhs is a Smi. Check whether the non-smi is a heap number.
+ __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ if (strict) {
+ // If lhs was not a number and rhs was a Smi then strict equality cannot
+ // succeed. Return non-equal.
+ __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal.
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return.
+ } else {
+ // Smi compared non-strictly with a non-Smi non-heap-number. Call
+ // the runtime.
+ __ b(ne, slow);
+ }
+
+ // Lhs is a smi, rhs is a number.
+ // r0 is Smi and r1 is heap number.
+ __ push(lr);
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ pop(lr);
+ // Fall through to both_loaded_as_doubles.
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register lhs_exponent = exp_first ? r0 : r1;
+ Register rhs_exponent = exp_first ? r2 : r3;
+ Register lhs_mantissa = exp_first ? r1 : r0;
+ Register rhs_mantissa = exp_first ? r3 : r2;
+ Label one_is_nan, neither_is_nan;
+
+ Register exp_mask_reg = r5;
+
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
+ __ cmp(r4, Operand(exp_mask_reg));
+ __ b(ne, rhs_not_nan);
+ __ mov(r4,
+ Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(rhs_mantissa, Operand(0));
+ __ b(ne, &one_is_nan);
+
+ __ bind(rhs_not_nan);
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+ __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
+ __ cmp(r4, Operand(exp_mask_reg));
+ __ b(ne, &neither_is_nan);
+ __ mov(r4,
+ Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
+ SetCC);
+ __ b(ne, &one_is_nan);
+ __ cmp(lhs_mantissa, Operand(0));
+ __ b(eq, &neither_is_nan);
+
+ __ bind(&one_is_nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in r0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
+ } else {
+ __ mov(r0, Operand(LESS));
+ }
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&neither_is_nan);
+}
+
+
+// See comment at call site.
+static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
+ bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
+ Register lhs_exponent = exp_first ? r0 : r1;
+ Register rhs_exponent = exp_first ? r2 : r3;
+ Register lhs_mantissa = exp_first ? r1 : r0;
+ Register rhs_mantissa = exp_first ? r3 : r2;
+
+ // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
+ if (cc == eq) {
+ // Doubles are not equal unless they have the same bit pattern.
+ // Exception: 0 and -0.
+ __ cmp(lhs_mantissa, Operand(rhs_mantissa));
+ __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne);
+ // Return non-zero if the numbers are unequal.
+ __ mov(pc, Operand(lr), LeaveCC, ne);
+
+ __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC);
+ // If exponents are equal then return 0.
+ __ mov(pc, Operand(lr), LeaveCC, eq);
+
+ // Exponents are unequal. The only way we can return that the numbers
+ // are equal is if one is -0 and the other is 0. We already dealt
+ // with the case where both are -0 or both are 0.
+ // We start by seeing if the mantissas (that are equal) or the bottom
+ // 31 bits of the rhs exponent are non-zero. If so we return not
+ // equal.
+ __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC);
+ __ mov(r0, Operand(r4), LeaveCC, ne);
+ __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally.
+ // Now they are equal if and only if the lhs exponent is zero in its
+ // low 31 bits.
+ __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize));
+ __ mov(pc, Operand(lr));
+ } else {
+ // Call a native function to do a comparison between two non-NaNs.
+ // Call C routine that may not cause GC or other trouble.
+ __ mov(r5, Operand(ExternalReference::compare_doubles()));
+ __ Jump(r5); // Tail call.
+ }
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
+ // If either operand is a JSObject or an oddball value, then they are
+ // not equal since their pointers are different.
+ // There is no test for undetectability in strict equality.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label first_non_object;
+ // Get the type of the first operand into r2 and compare it with
+ // FIRST_JS_OBJECT_TYPE.
+ __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &first_non_object);
+
+ // Return non-zero (r0 is not zero)
+ Label return_not_equal;
+ __ bind(&return_not_equal);
+ __ mov(pc, Operand(lr)); // Return.
+
+ __ bind(&first_non_object);
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r2, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+
+ __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(ge, &return_not_equal);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ cmp(r3, Operand(ODDBALL_TYPE));
+ __ b(eq, &return_not_equal);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+ Label* both_loaded_as_doubles,
+ Label* not_heap_numbers,
+ Label* slow) {
+ __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, not_heap_numbers);
+ __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
+ __ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
+
+ // Both are heap numbers. Load them up then jump to the code we have
+ // for that.
+ __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+ __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for symbol-to-symbol equality.
+static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+ // r2 is object type of r0.
+ __ tst(r2, Operand(kIsNotStringMask));
+ __ b(ne, slow);
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(eq, slow);
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, slow);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, slow);
+
+ // Both are symbols. We already checked they weren't the same pointer
+ // so they are not equal.
+ __ mov(r0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(pc, Operand(lr)); // Return.
+}
+
+
+// On entry r0 and r1 are the things to be compared. On exit r0 is 0,
+// positive or negative to indicate the result of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles, rhs_not_nan;
+
+ // NOTICE! This code is only reached after a smi-fast-case check, so
+ // it is certain that at least one operand isn't a smi.
+
+ // Handle the case where the objects are identical. Either returns the answer
+ // or goes to slow. Only falls through if the objects were not identical.
+ EmitIdenticalObjectComparison(masm, &slow, cc_);
+
+ // If either is a Smi (we know that not both are), then they can only
+ // be strictly equal if the other is a HeapNumber.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ __ and_(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, ¬_smis);
+ // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+ // 1) Return the answer.
+ // 2) Go to slow.
+ // 3) Fall through to both_loaded_as_doubles.
+ // 4) Jump to rhs_not_nan.
+ // In cases 3 and 4 we have found out we were dealing with a number-number
+ // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+ EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_);
+
+ __ bind(&both_loaded_as_doubles);
+ // r0, r1, r2, r3 are the double representations of the left hand side
+ // and the right hand side.
+
+ // Checks for NaN in the doubles we have loaded. Can return the answer or
+ // fall through if neither is a NaN. Also binds rhs_not_nan.
+ EmitNanCheck(masm, &rhs_not_nan, cc_);
+
+ // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
+ // answer. Never falls through.
+ EmitTwoNonNanDoubleComparison(masm, cc_);
+
+ __ bind(¬_smis);
+ // At this point we know we are dealing with two different objects,
+ // and neither of them is a Smi. The objects are in r0 and r1.
+ if (strict_) {
+ // This returns non-equal for some object types, or falls through if it
+ // was not lucky.
+ EmitStrictTwoHeapObjectCompare(masm);
+ }
+
+ Label check_for_symbols;
+ // Check for heap-number-heap-number comparison. Can jump to slow case,
+ // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
+ // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // In this case r2 will contain the type of r0.
+ EmitCheckForTwoHeapNumbers(masm,
+ &both_loaded_as_doubles,
+ &check_for_symbols,
+ &slow);
+
+ __ bind(&check_for_symbols);
+ if (cc_ == eq) {
+ // Either jumps to slow or returns the answer. Assumes that r2 is the type
+ // of r0 on entry.
+ EmitCheckForSymbols(masm, &slow);
+ }
+
+ __ bind(&slow);
+ __ push(lr);
+ __ push(r1);
+ __ push(r0);
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ int arg_count = 1; // Not counting receiver.
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == lt || cc_ == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ncr = LESS;
+ }
+ arg_count++;
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ mov(r0, Operand(arg_count));
+ __ InvokeBuiltin(native, CALL_JS);
+ __ cmp(r0, Operand(0));
+ __ pop(pc);
+}
+
+
// Allocates a heap number or jumps to the label if the young space is full and
// a scavenge is needed.
static void AllocateHeapNumber(
@@ -4538,7 +5132,8 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
- ConvertToDoubleStub stub1(r3, r2, r0, r6);
+ __ mov(r7, Operand(r0));
+ ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
@@ -4854,6 +5449,85 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
}
+// Can we multiply by x with max two shifts and an add.
+// This answers yes to all integers from 2 to 10.
+static bool IsEasyToMultiplyBy(int x) {
+ if (x < 2) return false; // Avoid special cases.
+ if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows.
+ if (IsPowerOf2(x)) return true; // Simple shift.
+ if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift.
+ if (IsPowerOf2(x + 1)) return true; // Patterns like 11111.
+ return false;
+}
+
+
+// Can multiply by anything that IsEasyToMultiplyBy returns true for.
+// Source and destination may be the same register. This routine does
+// not set carry and overflow the way a mul instruction would.
+static void MultiplyByKnownInt(MacroAssembler* masm,
+ Register source,
+ Register destination,
+ int known_int) {
+ if (IsPowerOf2(known_int)) {
+ __ mov(destination, Operand(source, LSL, BitPosition(known_int)));
+ } else if (PopCountLessThanEqual2(known_int)) {
+ int first_bit = BitPosition(known_int);
+ int second_bit = BitPosition(known_int ^ (1 << first_bit));
+ __ add(destination, source, Operand(source, LSL, second_bit - first_bit));
+ if (first_bit != 0) {
+ __ mov(destination, Operand(destination, LSL, first_bit));
+ }
+ } else {
+ ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111.
+ int the_bit = BitPosition(known_int + 1);
+ __ rsb(destination, source, Operand(source, LSL, the_bit));
+ }
+}
+
+
+// This function (as opposed to MultiplyByKnownInt) takes the known int in a
+// a register for the cases where it doesn't know a good trick, and may deliver
+// a result that needs shifting.
+static void MultiplyByKnownInt2(
+ MacroAssembler* masm,
+ Register result,
+ Register source,
+ Register known_int_register, // Smi tagged.
+ int known_int,
+ int* required_shift) { // Including Smi tag shift
+ switch (known_int) {
+ case 3:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 1;
+ break;
+ case 5:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 1;
+ break;
+ case 6:
+ __ add(result, source, Operand(source, LSL, 1));
+ *required_shift = 2;
+ break;
+ case 7:
+ __ rsb(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 9:
+ __ add(result, source, Operand(source, LSL, 3));
+ *required_shift = 1;
+ break;
+ case 10:
+ __ add(result, source, Operand(source, LSL, 2));
+ *required_shift = 2;
+ break;
+ default:
+ ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient.
+ __ mul(result, source, known_int_register);
+ *required_shift = 0;
+ }
+}
+
+
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r1 : x
// r0 : y
@@ -4919,14 +5593,114 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ tst(r3, Operand(r3));
__ mov(r0, Operand(r3), LeaveCC, ne);
__ Ret(ne);
- // Slow case.
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ add(r2, r0, Operand(r1), SetCC);
+ __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ Ret(pl); // Return Smi 0 if the non-zero one was positive.
+ // Slow case. We fall through here if we multiplied a negative number
+ // with 0, because that would mean we should produce -0.
__ bind(&slow);
HandleBinaryOpSlowCases(masm,
¬_smi,
Builtins::MUL,
Token::MUL,
- mode_);
+ mode_);
+ break;
+ }
+
+ case Token::DIV:
+ case Token::MOD: {
+ Label not_smi;
+ if (specialized_on_rhs_) {
+ Label smi_is_unsuitable;
+ __ BranchOnNotSmi(r1, ¬_smi);
+ if (IsPowerOf2(constant_rhs_)) {
+ if (op_ == Token::MOD) {
+ __ and_(r0,
+ r1,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
+ SetCC);
+ // We now have the answer, but if the input was negative we also
+ // have the sign bit. Our work is done if the result is
+ // positive or zero:
+ __ Ret(pl);
+ // A mod of a negative left hand side must return a negative number.
+ // Unfortunately if the answer is 0 then we must return -0. And we
+ // already optimistically trashed r0 so we may need to restore it.
+ __ eor(r0, r0, Operand(0x80000000u), SetCC);
+ // Next two instructions are conditional on the answer being -0.
+ __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
+ __ b(eq, &smi_is_unsuitable);
+ // We need to subtract the dividend. Eg. -3 % 4 == -3.
+ __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_)));
+ } else {
+ ASSERT(op_ == Token::DIV);
+ __ tst(r1,
+ Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
+ __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder.
+ int shift = 0;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ d >>= 1;
+ shift++;
+ }
+ __ mov(r0, Operand(r1, LSR, shift));
+ __ bic(r0, r0, Operand(kSmiTagMask));
+ }
+ } else {
+ // Not a power of 2.
+ __ tst(r1, Operand(0x80000000u));
+ __ b(ne, &smi_is_unsuitable);
+ // Find a fixed point reciprocal of the divisor so we can divide by
+ // multiplying.
+ double divisor = 1.0 / constant_rhs_;
+ int shift = 32;
+ double scale = 4294967296.0; // 1 << 32.
+ uint32_t mul;
+ // Maximise the precision of the fixed point reciprocal.
+ while (true) {
+ mul = static_cast(scale * divisor);
+ if (mul >= 0x7fffffff) break;
+ scale *= 2.0;
+ shift++;
+ }
+ mul++;
+ __ mov(r2, Operand(mul));
+ __ umull(r3, r2, r2, r1);
+ __ mov(r2, Operand(r2, LSR, shift - 31));
+ // r2 is r1 / rhs. r2 is not Smi tagged.
+ // r0 is still the known rhs. r0 is Smi tagged.
+ // r1 is still the unkown lhs. r1 is Smi tagged.
+ int required_r4_shift = 0; // Including the Smi tag shift of 1.
+ // r4 = r2 * r0.
+ MultiplyByKnownInt2(masm,
+ r4,
+ r2,
+ r0,
+ constant_rhs_,
+ &required_r4_shift);
+ // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs).
+ if (op_ == Token::DIV) {
+ __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC);
+ __ b(ne, &smi_is_unsuitable); // There was a remainder.
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ } else {
+ ASSERT(op_ == Token::MOD);
+ __ sub(r0, r1, Operand(r4, LSL, required_r4_shift));
+ }
+ }
+ __ Ret();
+ __ bind(&smi_is_unsuitable);
+ } else {
+ __ jmp(¬_smi);
+ }
+ HandleBinaryOpSlowCases(masm,
+ ¬_smi,
+ op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
+ op_,
+ mode_);
break;
}
@@ -4951,7 +5725,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ and_(r2, r2, Operand(0x1f));
__ mov(r0, Operand(r1, ASR, r2));
// Smi tag result.
- __ and_(r0, r0, Operand(~kSmiTagMask));
+ __ bic(r0, r0, Operand(kSmiTagMask));
break;
case Token::SHR:
// Remove tags from operands. We can't do this on a 31 bit number
@@ -5015,7 +5789,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void UnarySubStub::Generate(MacroAssembler* masm) {
Label undo;
Label slow;
- Label done;
Label not_smi;
// Enter runtime system if the value is not a smi.
@@ -5041,9 +5814,6 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ mov(r0, Operand(0)); // Set number of arguments.
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- __ bind(&done);
- __ StubReturn(1);
-
__ bind(¬_smi);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
@@ -5203,9 +5973,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
- __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
- __ push(lr);
- __ Jump(r5);
+ masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
+ masm->push(lr);
+ masm->Jump(r5);
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -5629,6 +6399,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast(cc_) >> 28 < (1 << 15));
+ return (static_cast(cc_) >> 27) | (strict_ ? 1 : 0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 4fab900166..6391a8eb9a 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -186,6 +186,8 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+ static const int kUnknownIntValue = -1;
+
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle