Browse Source

Upgrade V8 to 1.2.14

v0.7.4-release
Ryan 16 years ago
parent
commit
88e9a5f122
  1. 50
      deps/v8/ChangeLog
  2. 15
      deps/v8/SConstruct
  3. 5
      deps/v8/benchmarks/README.txt
  4. 14
      deps/v8/benchmarks/deltablue.js
  5. 5
      deps/v8/benchmarks/revisions.html
  6. 1
      deps/v8/src/api.h
  7. 1
      deps/v8/src/apinatives.js
  8. 42
      deps/v8/src/arm/assembler-arm.cc
  9. 965
      deps/v8/src/arm/codegen-arm.cc
  10. 17
      deps/v8/src/arm/codegen-arm.h
  11. 31
      deps/v8/src/arm/constants-arm.h
  12. 48
      deps/v8/src/arm/disasm-arm.cc
  13. 35
      deps/v8/src/arm/ic-arm.cc
  14. 27
      deps/v8/src/arm/macro-assembler-arm.cc
  15. 85
      deps/v8/src/arm/simulator-arm.cc
  16. 465
      deps/v8/src/arm/stub-cache-arm.cc
  17. 28
      deps/v8/src/assembler.cc
  18. 1
      deps/v8/src/assembler.h
  19. 13
      deps/v8/src/ast.h
  20. 65
      deps/v8/src/bootstrapper.cc
  21. 19
      deps/v8/src/code-stubs.cc
  22. 2
      deps/v8/src/codegen.cc
  23. 32
      deps/v8/src/codegen.h
  24. 1
      deps/v8/src/compilation-cache.cc
  25. 2
      deps/v8/src/d8-debug.h
  26. 6
      deps/v8/src/d8-posix.cc
  27. 231
      deps/v8/src/date-delay.js
  28. 66
      deps/v8/src/debug-delay.js
  29. 22
      deps/v8/src/debug.cc
  30. 5
      deps/v8/src/debug.h
  31. 7
      deps/v8/src/disassembler.cc
  32. 2
      deps/v8/src/execution.cc
  33. 45
      deps/v8/src/factory.cc
  34. 23
      deps/v8/src/factory.h
  35. 3
      deps/v8/src/flag-definitions.h
  36. 2
      deps/v8/src/frames-inl.h
  37. 17
      deps/v8/src/globals.h
  38. 28
      deps/v8/src/handles.cc
  39. 19
      deps/v8/src/heap-inl.h
  40. 488
      deps/v8/src/heap.cc
  41. 295
      deps/v8/src/heap.h
  42. 14
      deps/v8/src/ia32/assembler-ia32.cc
  43. 9
      deps/v8/src/ia32/assembler-ia32.h
  44. 137
      deps/v8/src/ia32/codegen-ia32.cc
  45. 6
      deps/v8/src/ia32/codegen-ia32.h
  46. 30
      deps/v8/src/ia32/ic-ia32.cc
  47. 519
      deps/v8/src/ia32/stub-cache-ia32.cc
  48. 116
      deps/v8/src/ic.cc
  49. 64
      deps/v8/src/interpreter-irregexp.cc
  50. 116
      deps/v8/src/jsregexp.cc
  51. 11
      deps/v8/src/jsregexp.h
  52. 5
      deps/v8/src/log.cc
  53. 2
      deps/v8/src/macro-assembler.h
  54. 21
      deps/v8/src/macros.py
  55. 185
      deps/v8/src/mark-compact.cc
  56. 10
      deps/v8/src/mark-compact.h
  57. 26
      deps/v8/src/math.js
  58. 288
      deps/v8/src/messages.js
  59. 2
      deps/v8/src/mirror-delay.js
  60. 99
      deps/v8/src/objects-debug.cc
  61. 132
      deps/v8/src/objects-inl.h
  62. 1119
      deps/v8/src/objects.cc
  63. 687
      deps/v8/src/objects.h
  64. 292
      deps/v8/src/parser.cc
  65. 1
      deps/v8/src/platform-freebsd.cc
  66. 1
      deps/v8/src/platform-linux.cc
  67. 153
      deps/v8/src/platform-macos.cc
  68. 16
      deps/v8/src/platform-win32.cc
  69. 7
      deps/v8/src/platform.h
  70. 14
      deps/v8/src/property.cc
  71. 101
      deps/v8/src/property.h
  72. 2
      deps/v8/src/regexp-delay.js
  73. 249
      deps/v8/src/runtime.cc
  74. 10
      deps/v8/src/runtime.h
  75. 27
      deps/v8/src/runtime.js
  76. 107
      deps/v8/src/scopes.cc
  77. 58
      deps/v8/src/scopes.h
  78. 92
      deps/v8/src/serialize.cc
  79. 3
      deps/v8/src/serialize.h
  80. 23
      deps/v8/src/spaces-inl.h
  81. 354
      deps/v8/src/spaces.cc
  82. 157
      deps/v8/src/spaces.h
  83. 9
      deps/v8/src/string-stream.cc
  84. 10
      deps/v8/src/string.js
  85. 98
      deps/v8/src/stub-cache.cc
  86. 136
      deps/v8/src/stub-cache.h
  87. 13
      deps/v8/src/unicode.cc
  88. 6
      deps/v8/src/v8-counters.h
  89. 22
      deps/v8/src/v8natives.js
  90. 14
      deps/v8/src/variables.h
  91. 2
      deps/v8/src/version.cc
  92. 2
      deps/v8/src/virtual-frame.h
  93. 201
      deps/v8/src/x64/assembler-x64.cc
  94. 72
      deps/v8/src/x64/assembler-x64.h
  95. 791
      deps/v8/src/x64/codegen-x64.cc
  96. 68
      deps/v8/src/x64/codegen-x64.h
  97. 6
      deps/v8/src/x64/debug-x64.cc
  98. 1398
      deps/v8/src/x64/disasm-x64.cc
  99. 7
      deps/v8/src/x64/frames-x64.h
  100. 6
      deps/v8/src/x64/ic-x64.cc

50
deps/v8/ChangeLog

@ -1,3 +1,53 @@
2009-07-13: Version 1.2.14
Added separate paged heap space for global property cells and
avoid updating the write barrier when storing into them.
Improved peep-hole optimization on ARM platforms by not emitting
unnecessary debug information.
Re-enabled ICs for loads and calls that skip a global object
during lookup through the prototype chain.
Allowed access through global proxies to use ICs.
Fixed issue 401.
2009-07-09: Version 1.2.13
Fixed issue 397, issue 398, and issue 399.
Added support for breakpoint groups.
Fixed bugs introduced with the new global object representation.
Fixed a few bugs in the ARM code generator.
2009-07-06: Version 1.2.12
Added stack traces collection to Error objects accessible through
the e.stack property.
Changed RegExp parser to use a recursive data structure instead of
stack-based recursion.
Optimized Date object construction and string concatenation.
Improved performance of div, mod, and mul on ARM platforms.
2009-07-02: Version 1.2.11
Improved performance on IA-32 and ARM.
Fixed profiler sampler implementation on Mac OS X.
Changed the representation of global objects to improve
performance of adding a lot of new properties.
2009-06-29: Version 1.2.10
Improved debugger support.

15
deps/v8/SConstruct

@ -95,7 +95,12 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = {
'all': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')]
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
'arch:ia32' : {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
}
}
},
'gcc': {
'all': {
@ -167,6 +172,7 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'arch:x64': {
'CCFLAGS': ['-fno-strict-aliasing'],
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
'prof:oprofile': {
@ -546,6 +552,11 @@ SIMPLE_OPTIONS = {
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
'regexp': {
'values': ['native', 'interpreted'],
'default': 'native',
'help': 'Whether to use native or interpreted regexp implementation'
},
'snapshot': {
'values': ['on', 'off', 'nobuild'],
'default': 'off',
@ -677,6 +688,8 @@ def VerifyOptions(env):
return False
if not IsLegal(env, 'sample', ["shell", "process"]):
return False
if not IsLegal(env, 'regexp', ["native", "interpreted"]):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':

5
deps/v8/benchmarks/README.txt

@ -57,4 +57,7 @@ of the benchmark.
Changes from Version 4 to Version 5
===================================
Removed duplicate line in random seed code.
Removed duplicate line in random seed code, and changed the name of
the Object.prototype.inherits function in the DeltaBlue benchmark to
inheritsFrom to avoid name clashes when running in Chromium with
extensions enabled.

14
deps/v8/benchmarks/deltablue.js

@ -46,7 +46,7 @@ var DeltaBlue = new BenchmarkSuite('DeltaBlue', 71104, [
/* --- O b j e c t M o d e l --- */
Object.prototype.inherits = function (shuper) {
Object.prototype.inheritsFrom = function (shuper) {
function Inheriter() { }
Inheriter.prototype = shuper.prototype;
this.prototype = new Inheriter();
@ -216,7 +216,7 @@ function UnaryConstraint(v, strength) {
this.addConstraint();
}
UnaryConstraint.inherits(Constraint);
UnaryConstraint.inheritsFrom(Constraint);
/**
* Adds this constraint to the constraint graph
@ -294,7 +294,7 @@ function StayConstraint(v, str) {
StayConstraint.superConstructor.call(this, v, str);
}
StayConstraint.inherits(UnaryConstraint);
StayConstraint.inheritsFrom(UnaryConstraint);
StayConstraint.prototype.execute = function () {
// Stay constraints do nothing
@ -312,7 +312,7 @@ function EditConstraint(v, str) {
EditConstraint.superConstructor.call(this, v, str);
}
EditConstraint.inherits(UnaryConstraint);
EditConstraint.inheritsFrom(UnaryConstraint);
/**
* Edits indicate that a variable is to be changed by imperative code.
@ -346,7 +346,7 @@ function BinaryConstraint(var1, var2, strength) {
this.addConstraint();
}
BinaryConstraint.inherits(Constraint);
BinaryConstraint.inheritsFrom(Constraint);
/**
* Decides if this constratint can be satisfied and which way it
@ -459,7 +459,7 @@ function ScaleConstraint(src, scale, offset, dest, strength) {
ScaleConstraint.superConstructor.call(this, src, dest, strength);
}
ScaleConstraint.inherits(BinaryConstraint);
ScaleConstraint.inheritsFrom(BinaryConstraint);
/**
* Adds this constraint to the constraint graph.
@ -515,7 +515,7 @@ function EqualityConstraint(var1, var2, strength) {
EqualityConstraint.superConstructor.call(this, var1, var2, strength);
}
EqualityConstraint.inherits(BinaryConstraint);
EqualityConstraint.inheritsFrom(BinaryConstraint);
/**
* Enforce this constraint. Assume that it is satisfied.

5
deps/v8/benchmarks/revisions.html

@ -22,7 +22,10 @@ the benchmark suite.
<div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div>
<p>Removed a duplicate line in the base random seed code.
<p>Removed duplicate line in random seed code, and changed the name of
the Object.prototype.inherits function in the DeltaBlue benchmark to
inheritsFrom to avoid name clashes when running in Chromium with
extensions enabled.
</p>
<div class="subtitle"><h3>Version 4 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v4/run.html">link</a>)</h3></div>

1
deps/v8/src/api.h

@ -246,6 +246,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom(
#define MAKE_TO_LOCAL(Name, From, To) \
Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
ASSERT(!obj->IsTheHole()); \
return Local<To>(reinterpret_cast<To*>(obj.location())); \
}

1
deps/v8/src/apinatives.js

@ -51,6 +51,7 @@ function Instantiate(data, name) {
var Constructor = %GetTemplateField(data, kApiConstructorOffset);
var result = Constructor ? new (Instantiate(Constructor))() : {};
ConfigureTemplateInstance(result, data);
result = %ToFastProperties(result);
return result;
default:
throw 'Unknown API tag <' + tag + '>';

42
deps/v8/src/arm/assembler-arm.cc

@ -491,6 +491,20 @@ static bool fits_shifter(uint32_t imm32,
}
// We have to use the temporary register for things that can be relocated even
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
static bool MustUseIp(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
}
return true;
}
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
@ -501,8 +515,7 @@ void Assembler::addrmod1(Instr instr,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
if ((x.rmode_ != RelocInfo::NONE &&
x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) ||
if (MustUseIp(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
@ -684,6 +697,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
void Assembler::blx(int branch_offset) { // v5 and above
WriteRecordedPositions();
ASSERT((branch_offset & 1) == 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
@ -693,12 +707,14 @@ void Assembler::blx(int branch_offset) { // v5 and above
void Assembler::blx(Register target, Condition cond) { // v5 and above
WriteRecordedPositions();
ASSERT(!target.is(pc));
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
}
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
WriteRecordedPositions();
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
}
@ -797,6 +813,9 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2,
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
}
addrmod1(cond | 13*B21 | s, r0, dst, src);
}
@ -816,7 +835,6 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
ASSERT(!dst.is(src1));
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -825,7 +843,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
void Assembler::mul(Register dst, Register src1, Register src2,
SBit s, Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dst.is(src1));
// dst goes in bits 16-19 for this instruction!
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
}
@ -837,7 +855,7 @@ void Assembler::smlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -850,7 +868,7 @@ void Assembler::smull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
ASSERT(!dstL.is(dstH));
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -863,7 +881,7 @@ void Assembler::umlal(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
ASSERT(!dstL.is(dstH));
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -876,7 +894,7 @@ void Assembler::umull(Register dstL,
SBit s,
Condition cond) {
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
ASSERT(!dstL.is(dstH));
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
src2.code()*B8 | B7 | B4 | src1.code());
}
@ -906,8 +924,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// immediate
uint32_t rotate_imm;
uint32_t immed_8;
if ((src.rmode_ != RelocInfo::NONE &&
src.rmode_ != RelocInfo::EXTERNAL_REFERENCE)||
if (MustUseIp(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// immediate operand cannot be encoded, load it first to register ip
RecordRelocInfo(src.rmode_, src.imm32_);
@ -926,6 +943,9 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
// Load/Store instructions
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
if (dst.is(pc)) {
WriteRecordedPositions();
}
addrmod2(cond | B26 | L, dst, src);
// Eliminate pattern: push(r), pop(r)
@ -1263,7 +1283,6 @@ void Assembler::RecordPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_position_ = pos;
WriteRecordedPositions();
}
@ -1271,7 +1290,6 @@ void Assembler::RecordStatementPosition(int pos) {
if (pos == RelocInfo::kNoPosition) return;
ASSERT(pos >= 0);
current_statement_position_ = pos;
WriteRecordedPositions();
}

965
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

17
deps/v8/src/arm/codegen-arm.h

@ -186,6 +186,8 @@ class CodeGenerator: public AstVisitor {
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
static const int kUnknownIntValue = -1;
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@ -291,8 +293,13 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
void Comparison(Condition cc, bool strict = false);
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
bool strict = false);
void SmiOperation(Token::Value op,
Handle<Object> value,
@ -333,11 +340,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the value field of an object (used by Date).
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);

31
deps/v8/src/arm/constants-arm.h

@ -36,6 +36,19 @@
# define USE_ARM_EABI 1
#endif
// This means that interwork-compatible jump instructions are generated. We
// want to generate them on the simulator too so it makes snapshots that can
// be used on real hardware.
#if defined(__THUMB_INTERWORK__) || !defined(__arm__)
# define USE_THUMB_INTERWORK 1
#endif
// Simulator should support ARM5 instructions.
#if !defined(__arm__)
# define __ARM_ARCH_5__ 1
# define __ARM_ARCH_5T__ 1
#endif
namespace assembler {
namespace arm {
@ -97,6 +110,24 @@ enum Opcode {
};
// Some special instructions encoded as a TEQ with S=0 (bit 20).
enum Opcode9Bits {
BX = 1,
BXJ = 2,
BLX = 3,
BKPT = 7
};
// Some special instructions encoded as a CMN with S=0 (bit 20).
enum Opcode11Bits {
CLZ = 1
};
// S
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
no_shift = -1,

48
deps/v8/src/arm/disasm-arm.cc

@ -438,6 +438,18 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
// The manual gets the meaning of bit 22 backwards in the multiply
// instruction overview on page A3.16.2. The instructions that
// exist in u and s variants are the following:
// smull A4.1.87
// umull A4.1.129
// umlal A4.1.128
// smlal A4.1.76
// For these 0 means u and 1 means s. As can be seen on their individual
// pages. The other 18 mul instructions have the bit set or unset in
// arbitrary ways that are unrelated to the signedness of the instruction.
// None of these 18 instructions exist in both a 'u' and an 's' variant.
if (instr->Bit(22) == 0) {
Print("u");
} else {
@ -494,12 +506,25 @@ void Decoder::DecodeType01(Instr* instr) {
// multiply instructions
if (instr->Bit(23) == 0) {
if (instr->Bit(21) == 0) {
Format(instr, "mul'cond's 'rd, 'rm, 'rs");
// The MUL instruction description (A 4.1.33) refers to Rd as being
// the destination for the operation, but it confusingly uses the
// Rn field to encode it.
Format(instr, "mul'cond's 'rn, 'rm, 'rs");
} else {
Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
// The MLA instruction description (A 4.1.28) refers to the order
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
}
} else {
Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
// The signed/long multiply instructions use the terms RdHi and RdLo
// when referring to the target registers. They are mapped to the Rn
// and Rd fields as follows:
// RdLo == Rd field
// RdHi == Rn field
// The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs>
Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
}
} else {
Unknown(instr); // not used by V8
@ -593,7 +618,17 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "teq'cond 'rn, 'shift_op");
} else {
switch (instr->Bits(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
case BLX:
Format(instr, "blx'cond 'rm");
break;
default:
Unknown(instr); // not used by V8
break;
}
}
break;
}
@ -609,7 +644,14 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmn'cond 'rn, 'shift_op");
} else {
switch (instr->Bits(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
default:
Unknown(instr); // not used by V8
break;
}
}
break;
}

35
deps/v8/src/arm/ic-arm.cc

@ -67,11 +67,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Load the map into t0.
__ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
// Test the has_named_interceptor bit in the map.
__ ldr(t0, FieldMemOperand(t1, Map::kInstanceAttributesOffset));
__ tst(t0, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
__ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset));
__ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
// Jump to miss if the interceptor bit is set.
__ b(ne, miss);
// Bail out if we have a JS global proxy object.
__ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Check that the properties array is a dictionary.
__ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
@ -81,13 +85,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Compute the capacity mask.
const int kCapacityOffset =
Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
__ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
__ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
__ sub(r3, r3, Operand(1));
const int kElementsStartOffset =
Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
@ -98,12 +102,12 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
__ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) {
__ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
__ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i)));
}
__ and_(t1, t1, Operand(r3));
// Scale the index by multiplying by the element size.
ASSERT(Dictionary::kElementSize == 3);
ASSERT(StringDictionary::kEntrySize == 3);
__ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
// Check if the key is identical to the name.
@ -188,11 +192,14 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// -- [sp] : receiver
// -----------------------------------
// NOTE: Right now, this code always misses on ARM which is
// sub-optimal. We should port the fast case code from IA-32.
Label miss;
// Load receiver.
__ ldr(r0, MemOperand(sp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@ -213,7 +220,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -250,7 +257,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
@ -418,7 +425,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r0, r2, r3);
StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
@ -757,7 +764,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r1, r2, r3);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));

27
deps/v8/src/arm/macro-assembler-arm.cc

@ -46,14 +46,14 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// We always generate arm code, never thumb code, even if V8 is compiled to
// thumb, so we require inter-working support
#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK)
#error "flag -mthumb-interwork missing"
#endif
// We do not support thumb inter-working with an arm architecture not supporting
// the blx instruction (below v5t)
#if defined(__THUMB_INTERWORK__)
#if defined(USE_THUMB_INTERWORK)
#if !defined(__ARM_ARCH_5T__) && \
!defined(__ARM_ARCH_5TE__) && \
!defined(__ARM_ARCH_7A__) && \
@ -65,12 +65,12 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
// Using blx may yield better code, so use it when required or when available
#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__)
#if defined(USE_THUMB_INTERWORK) || defined(__ARM_ARCH_5__)
#define USE_BLX 1
#endif
// Using bx does not yield better code, so use it only when required
#if defined(__THUMB_INTERWORK__)
#if defined(USE_THUMB_INTERWORK)
#define USE_BX 1
#endif
@ -290,11 +290,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Align the stack at this point. After this point we have 5 pushes,
// so in fact we have to unalign here! See also the assert on the
// alignment immediately below.
if (OS::ActivationFrameAlignment() != kPointerSize) {
#if defined(V8_HOST_ARCH_ARM)
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
int activation_frame_alignment = OS::ActivationFrameAlignment();
#else // defined(V8_HOST_ARCH_ARM)
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so we will always align at
// this point here.
int activation_frame_alignment = 2 * kPointerSize;
#endif // defined(V8_HOST_ARCH_ARM)
if (activation_frame_alignment != kPointerSize) {
// This code needs to be made more general if this assert doesn't hold.
ASSERT(OS::ActivationFrameAlignment() == 2 * kPointerSize);
ASSERT(activation_frame_alignment == 2 * kPointerSize);
mov(r7, Operand(Smi::FromInt(0)));
tst(sp, Operand(OS::ActivationFrameAlignment() - 1));
tst(sp, Operand(activation_frame_alignment - 1));
push(r7, eq); // Conditional push instruction.
}

85
deps/v8/src/arm/simulator-arm.cc

@ -1046,6 +1046,9 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
int64_t result = target(arg0, arg1, arg2, arg3);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
}
set_register(r0, lo_res);
set_register(r1, hi_res);
set_register(r0, result);
@ -1077,41 +1080,63 @@ void Simulator::DecodeType01(Instr* instr) {
// multiply instruction or extra loads and stores
if (instr->Bits(7, 4) == 9) {
if (instr->Bit(24) == 0) {
// multiply instructions
int rd = instr->RdField();
// Raw field decoding here. Multiply instructions have their Rd in
// funny places.
int rn = instr->RnField();
int rm = instr->RmField();
int rs = instr->RsField();
int32_t rs_val = get_register(rs);
int32_t rm_val = get_register(rm);
if (instr->Bit(23) == 0) {
if (instr->Bit(21) == 0) {
// Format(instr, "mul'cond's 'rd, 'rm, 'rs");
// The MUL instruction description (A 4.1.33) refers to Rd as being
// the destination for the operation, but it confusingly uses the
// Rn field to encode it.
// Format(instr, "mul'cond's 'rn, 'rm, 'rs");
int rd = rn; // Remap the rn field to the Rd register.
int32_t alu_out = rm_val * rs_val;
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
}
} else {
Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
// The MLA instruction description (A 4.1.28) refers to the order
// of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the
// Rn field to encode the Rd register and the Rd field to encode
// the Rn register.
Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd");
}
} else {
// Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
int rn = instr->RnField();
// The signed/long multiply instructions use the terms RdHi and RdLo
// when referring to the target registers. They are mapped to the Rn
// and Rd fields as follows:
// RdLo == Rd
// RdHi == Rn (This is confusingly stored in variable rd here
// because the mul instruction from above uses the
// Rn field to encode the Rd register. Good luck figuring
// this out without reading the ARM instruction manual
// at a very detailed level.)
// Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm");
int rd_hi = rn; // Remap the rn field to the RdHi register.
int rd_lo = instr->RdField();
int32_t hi_res = 0;
int32_t lo_res = 0;
if (instr->Bit(22) == 0) {
// signed multiply
UNIMPLEMENTED();
if (instr->Bit(22) == 1) {
int64_t left_op = static_cast<int32_t>(rm_val);
int64_t right_op = static_cast<int32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
lo_res = static_cast<int32_t>(result & 0xffffffff);
} else {
// unsigned multiply
uint64_t left_op = rm_val;
uint64_t right_op = rs_val;
uint64_t left_op = static_cast<uint32_t>(rm_val);
uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
lo_res = static_cast<int32_t>(result & 0xffffffff);
}
set_register(rn, hi_res);
set_register(rd, lo_res);
set_register(rd_lo, lo_res);
set_register(rd_hi, hi_res);
if (instr->HasS()) {
UNIMPLEMENTED();
}
@ -1357,8 +1382,22 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
ASSERT(type == 0);
int rm = instr->RmField();
switch (instr->Bits(7, 4)) {
case BX:
set_pc(get_register(rm));
break;
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
set_register(lr, old_pc + Instr::kInstrSize);
break;
}
default:
UNIMPLEMENTED();
}
}
break;
}
@ -1381,8 +1420,28 @@ void Simulator::DecodeType01(Instr* instr) {
Format(instr, "cmn'cond 'rn, 'shift_rm");
Format(instr, "cmn'cond 'rn, 'imm");
} else {
ASSERT(type == 0);
int rm = instr->RmField();
int rd = instr->RdField();
switch (instr->Bits(7, 4)) {
case CLZ: {
uint32_t bits = get_register(rm);
int leading_zeros = 0;
if (bits == 0) {
leading_zeros = 32;
} else {
while ((bits & 0x80000000u) == 0) {
bits <<= 1;
leading_zeros++;
}
}
set_register(rd, leading_zeros);
break;
}
default:
UNIMPLEMENTED();
}
}
break;
}

465
deps/v8/src/arm/stub-cache-arm.cc

@ -85,7 +85,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch) {
Register scratch,
Register extra) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
@ -170,110 +171,6 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
void StubCompiler::GenerateLoadField(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss_label);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
GenerateFastPropertyLoad(masm, r0, reg, holder, index);
__ Ret();
}
void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss_label);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
__ Ret();
}
void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss_label);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
__ push(ip);
__ push(name); // name
__ push(reg); // holder
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 4);
}
void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss_label);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name); // name
__ mov(scratch1, Operand(lookup_hint));
__ push(scratch1);
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
__ TailCallRuntime(load_ic_property, 4);
}
void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
@ -350,6 +247,17 @@ void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
}
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(r0, scratch1);
__ Ret();
}
// Generate StoreField code, value is passed in r0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
@ -461,6 +369,147 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
while (object != holder) {
if (object->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(object);
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) {
set_failure(Failure::cast(probe));
return result;
}
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(Handle<Object>(cell)));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ cmp(scratch, Operand(Factory::the_hole_value()));
__ b(ne, miss);
}
object = JSObject::cast(object->GetPrototype());
}
// Return the register containin the holder.
return result;
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
}
void StubCompiler::GenerateLoadConstant(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
__ Ret();
}
void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
__ push(ip);
__ push(name_reg); // name
__ push(reg); // holder
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 4);
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
__ b(eq, miss);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name_reg); // name
__ mov(scratch1, Operand(lookup_hint));
__ push(scratch1);
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
__ TailCallRuntime(load_ic_property, 4);
}
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
// ----------- S t a t e -------------
// -- r1: function
@ -496,9 +545,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
String* name,
Code::Flags flags) {
ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
String* name) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@ -514,7 +561,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
// Check that the function really is a function.
@ -540,16 +587,15 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCodeWithFlags(flags, name);
return GetCode(FIELD, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
CheckType check,
Code::Flags flags) {
ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
@ -572,7 +618,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
switch (check) {
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
__ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@ -590,8 +636,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r2);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
r2, holder, r3, r1, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
r1, name, &miss);
break;
case NUMBER_CHECK: {
@ -606,8 +652,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
r2);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
r2, holder, r3, r1, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
r1, name, &miss);
break;
}
@ -623,13 +669,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
r2);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
r2, holder, r3, r1, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
r1, name, &miss);
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
__ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
// Make sure object->elements()->map() != Heap::hash_table_map()
// Get the elements array of the object.
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
@ -648,6 +694,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
@ -663,7 +710,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCodeWithFlags(flags, function_name);
return GetCode(CONSTANT_FUNCTION, function_name);
}
@ -687,6 +734,72 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -- lr: return address
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
// Get the number of arguments.
const int argc = arguments().immediate();
// Get the receiver from the stack.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss);
}
// Check that the maps haven't changed.
CheckPrototypes(object, r0, holder, r3, r2, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
__ cmp(r1, Operand(Handle<JSFunction>(function)));
__ b(ne, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
// Setup the context (function already in r1).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
// Handle call cache miss.
__ bind(&miss);
__ DecrementCounter(&Counters::call_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, name);
}
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@ -827,6 +940,43 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
// Check that the map of the global has not changed.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
// Store the value in the cell.
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
__ Ret();
// Handle store cache miss.
__ bind(&miss);
__ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, name);
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
@ -840,7 +990,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadField(masm(), object, holder, r0, r3, r1, index, &miss);
GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -861,7 +1011,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss;
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadCallback(masm(), object, holder, r0, r2, r3, r1, callback, &miss);
GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -883,7 +1033,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadConstant(masm(), object, holder, r0, r3, r1, value, &miss);
GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -904,14 +1054,14 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
__ ldr(r0, MemOperand(sp, 0));
GenerateLoadInterceptor(masm(),
object,
GenerateLoadInterceptor(object,
holder,
holder->InterceptorPropertyLookupHint(name),
r0,
r2,
r3,
r1,
name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -921,6 +1071,56 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- [sp] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
// Get the receiver from the stack.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
}
// Check that the map of the global has not changed.
CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
__ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ cmp(r0, Operand(Factory::the_hole_value()));
__ b(eq, &miss);
}
__ Ret();
__ bind(&miss);
__ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NORMAL, name);
}
// TODO(1224671): IC stubs for keyed loads have not been implemented
// for ARM.
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
@ -940,7 +1140,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
GenerateLoadField(masm(), receiver, holder, r0, r3, r1, index, &miss);
GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -965,8 +1165,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
GenerateLoadCallback(masm(), receiver, holder, r0, r2, r3,
r1, callback, &miss);
GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -992,7 +1191,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
GenerateLoadConstant(masm(), receiver, holder, r0, r3, r1, value, &miss);
GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1018,14 +1217,14 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(r2, Operand(Handle<String>(name)));
__ b(ne, &miss);
GenerateLoadInterceptor(masm(),
receiver,
GenerateLoadInterceptor(receiver,
holder,
Smi::FromInt(JSObject::kLookupInHolder),
r0,
r2,
r3,
r1,
name,
&miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

28
deps/v8/src/assembler.cc

@ -608,6 +608,22 @@ static double mul_two_doubles(double x, double y) {
}
static double div_two_doubles(double x, double y) {
return x / y;
}
static double mod_two_doubles(double x, double y) {
return fmod(x, y);
}
static int native_compare_doubles(double x, double y) {
if (x == y) return 0;
return x < y ? 1 : -1;
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
typedef double BinaryFPOperation(double x, double y);
@ -622,6 +638,12 @@ ExternalReference ExternalReference::double_fp_operation(
case Token::MUL:
function = &mul_two_doubles;
break;
case Token::DIV:
function = &div_two_doubles;
break;
case Token::MOD:
function = &mod_two_doubles;
break;
default:
UNREACHABLE();
}
@ -630,6 +652,12 @@ ExternalReference ExternalReference::double_fp_operation(
}
ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
false));
}
ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;

1
deps/v8/src/assembler.h

@ -413,6 +413,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_allocation_limit_address();
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
Address address() const {return reinterpret_cast<Address>(address_);}

13
deps/v8/src/ast.h

@ -1575,16 +1575,10 @@ class RegExpQuantifier: public RegExpTree {
};
enum CaptureAvailability {
CAPTURE_AVAILABLE,
CAPTURE_UNREACHABLE,
CAPTURE_PERMANENTLY_UNREACHABLE
};
class RegExpCapture: public RegExpTree {
public:
explicit RegExpCapture(RegExpTree* body, int index)
: body_(body), index_(index), available_(CAPTURE_AVAILABLE) { }
: body_(body), index_(index) { }
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@ -1600,16 +1594,11 @@ class RegExpCapture: public RegExpTree {
virtual int max_match() { return body_->max_match(); }
RegExpTree* body() { return body_; }
int index() { return index_; }
inline CaptureAvailability available() { return available_; }
inline void set_available(CaptureAvailability availability) {
available_ = availability;
}
static int StartRegister(int index) { return index * 2; }
static int EndRegister(int index) { return index * 2 + 1; }
private:
RegExpTree* body_;
int index_;
CaptureAvailability available_;
};

65
deps/v8/src/bootstrapper.cc

@ -134,7 +134,7 @@ void Bootstrapper::TearDown() {
}
// Pending fixups are code positions that have refer to builtin code
// Pending fixups are code positions that refer to builtin code
// objects that were not available at the time the code was generated.
// The pending list is processed whenever an environment has been
// created.
@ -216,7 +216,6 @@ bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) {
*reinterpret_cast<Object**>(pc) = f->code();
}
} else {
ASSERT(is_pc_relative);
Assembler::set_target_address_at(pc, f->code()->instruction_start());
}
@ -539,7 +538,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
{ // --- G l o b a l ---
// Step 1: create a fresh inner JSGlobalObject
Handle<JSGlobalObject> object;
Handle<GlobalObject> object;
{
Handle<JSFunction> js_global_function;
Handle<ObjectTemplateInfo> js_global_template;
@ -579,9 +578,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
}
js_global_function->initial_map()->set_is_hidden_prototype();
SetExpectedNofProperties(js_global_function, 100);
object = Handle<JSGlobalObject>::cast(
Factory::NewJSObject(js_global_function, TENURED));
object = Factory::NewGlobalObject(js_global_function);
}
// Set the global context for the global object.
@ -963,12 +960,10 @@ bool Genesis::InstallNatives() {
Handle<String> name = Factory::LookupAsciiSymbol("builtins");
builtins_fun->shared()->set_instance_class_name(*name);
SetExpectedNofProperties(builtins_fun, 100);
// Allocate the builtins object.
Handle<JSBuiltinsObject> builtins =
Handle<JSBuiltinsObject>::cast(Factory::NewJSObject(builtins_fun,
TENURED));
Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun));
builtins->set_builtins(*builtins);
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
@ -1113,8 +1108,8 @@ bool Genesis::InstallNatives() {
}
#ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Remove this test when code generation works and is stable.
CodeGenerator::TestCodeGenerator();
// TODO(X64): Remove this when inline caches work.
FLAG_use_ic = false;
#endif // V8_HOST_ARCH_64_BIT
@ -1191,10 +1186,6 @@ bool Genesis::InstallNatives() {
apply->shared()->set_length(2);
}
// Make sure that the builtins object has fast properties.
// If the ASSERT below fails, please increase the expected number of
// properties for the builtins object.
ASSERT(builtins->HasFastProperties());
#ifdef DEBUG
builtins->Verify();
#endif
@ -1214,6 +1205,15 @@ bool Genesis::InstallSpecialObjects() {
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
SetProperty(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
@ -1373,43 +1373,35 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(from->map()->instance_descriptors());
int offset = 0;
while (true) {
// Iterating through the descriptors is not gc safe so we have to
// store the value in a handle and create a new stream for each entry.
DescriptorReader stream(*descs, offset);
if (stream.eos()) break;
// We have to read out the next offset before we do anything that may
// cause a gc, since the DescriptorReader is not gc safe.
offset = stream.next_position();
PropertyDetails details = stream.GetDetails();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PropertyDetails details = PropertyDetails(descs->GetDetails(i));
switch (details.type()) {
case FIELD: {
HandleScope inner;
Handle<String> key = Handle<String>(stream.GetKey());
int index = stream.GetFieldIndex();
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
SetProperty(to, key, value, details.attributes());
break;
}
case CONSTANT_FUNCTION: {
HandleScope inner;
Handle<String> key = Handle<String>(stream.GetKey());
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(stream.GetConstantFunction());
Handle<JSFunction>(descs->GetConstantFunction(i));
SetProperty(to, key, fun, details.attributes());
break;
}
case CALLBACKS: {
LookupResult result;
to->LocalLookup(stream.GetKey(), &result);
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
if (result.IsValid()) continue;
HandleScope inner;
Handle<DescriptorArray> inst_descs =
Handle<DescriptorArray>(to->map()->instance_descriptors());
Handle<String> key = Handle<String>(stream.GetKey());
Handle<Object> entry = Handle<Object>(stream.GetCallbacksObject());
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i));
inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
key,
entry,
@ -1431,8 +1423,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
} else {
Handle<Dictionary> properties =
Handle<Dictionary>(from->property_dictionary());
Handle<StringDictionary> properties =
Handle<StringDictionary>(from->property_dictionary());
int capacity = properties->Capacity();
for (int i = 0; i < capacity; i++) {
Object* raw_key(properties->KeyAt(i));
@ -1445,6 +1437,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
Handle<Object> value = Handle<Object>(properties->ValueAt(i));
if (value->IsJSGlobalPropertyCell()) {
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
SetProperty(to, key, value, details.attributes());
}
@ -1552,7 +1547,7 @@ Genesis::Genesis(Handle<Object> global_object,
// will always do unlinking.
previous_ = current_;
current_ = this;
result_ = NULL;
result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return;

19
deps/v8/src/code-stubs.cc

@ -37,8 +37,8 @@ namespace internal {
Handle<Code> CodeStub::GetCode() {
uint32_t key = GetKey();
int index = Heap::code_stubs()->FindNumberEntry(key);
if (index == -1) {
int index = Heap::code_stubs()->FindEntry(key);
if (index == NumberDictionary::kNotFound) {
HandleScope scope;
// Update the static counter each time a new code stub is generated.
@ -80,14 +80,15 @@ Handle<Code> CodeStub::GetCode() {
#endif
// Update the dictionary and the root in Heap.
Handle<Dictionary> dict =
Factory::DictionaryAtNumberPut(Handle<Dictionary>(Heap::code_stubs()),
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
key,
code);
Heap::set_code_stubs(*dict);
index = Heap::code_stubs()->FindNumberEntry(key);
Heap::public_set_code_stubs(*dict);
index = Heap::code_stubs()->FindEntry(key);
}
ASSERT(index != -1);
ASSERT(index != NumberDictionary::kNotFound);
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
@ -133,6 +134,10 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) {
return "InvokeBuiltin";
case JSExit:
return "JSExit";
case ConvertToDouble:
return "ConvertToDouble";
case WriteInt32ToHeapNumber:
return "WriteInt32ToHeapNumber";
default:
UNREACHABLE();
return NULL;

2
deps/v8/src/codegen.cc

@ -416,8 +416,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateIsSmi, "_IsSmi"},
{&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
{&CodeGenerator::GenerateIsArray, "_IsArray"},
{&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"},
{&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
{&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
{&CodeGenerator::GenerateClassOf, "_ClassOf"},
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},

32
deps/v8/src/codegen.h

@ -77,6 +77,8 @@ enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#else
#error Unsupported target architecture.
#endif
#include "register-allocator.h"
@ -249,6 +251,36 @@ class UnarySubStub : public CodeStub {
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
Major MajorKey() { return Compare; }
int MinorKey();
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
static_cast<int>(cc_),
strict_ ? "true" : "false");
}
#endif
};
class CEntryStub : public CodeStub {
public:
CEntryStub() { }

1
deps/v8/src/compilation-cache.cc

@ -281,6 +281,7 @@ void CompilationCacheScript::Put(Handle<String> source,
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
// TODO(X64): -fstrict-aliasing causes a problem with table. Fix it.
CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
}

2
deps/v8/src/d8-debug.h

@ -41,7 +41,7 @@ void HandleDebugEvent(DebugEvent event,
Handle<Object> event_data,
Handle<Value> data);
// Start the remote debugger connecting to a V8 debugger agent on the specified
// Start the remove debugger connecting to a V8 debugger agent on the specified
// port.
void RunRemoteDebugger(int port);

6
deps/v8/src/d8-posix.cc

@ -370,7 +370,11 @@ static Handle<Value> GetStdout(int child_fd,
// whether it exited normally. In the common case this doesn't matter because
// we don't get here before the child has closed stdout and most programs don't
// do that before they exit.
#if defined(WNOWAIT) && !defined(ANDROID)
//
// We're disabling usage of waitid in Mac OS X because it doens't work for us:
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
#define HAS_WAITID 1
#endif

231
deps/v8/src/date-delay.js

@ -28,7 +28,6 @@
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
// const $isNaN = GlobalIsNaN;
// const $isFinite = GlobalIsFinite;
// -------------------------------------------------------------------
@ -41,6 +40,11 @@
// changes to these properties.
const $Date = global.Date;
// Helper function to throw error.
function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
}
// ECMA 262 - 15.9.1.2
function Day(time) {
return FLOOR(time / msPerDay);
@ -232,7 +236,7 @@ function WeekDay(time) {
var local_time_offset = %DateLocalTimeOffset();
function LocalTime(time) {
if ($isNaN(time)) return time;
if (NUMBER_IS_NAN(time)) return time;
return time + local_time_offset + DaylightSavingsOffset(time);
}
@ -242,7 +246,7 @@ function LocalTimeNoCheck(time) {
function UTC(time) {
if ($isNaN(time)) return time;
if (NUMBER_IS_NAN(time)) return time;
var tmp = time - local_time_offset;
return tmp - DaylightSavingsOffset(tmp);
}
@ -423,30 +427,61 @@ function TimeClip(time) {
}
// The Date cache is used to limit the cost of parsing the same Date
// strings over and over again.
var Date_cache = {
// Cached time value.
time: $NaN,
// Cached year when interpreting the time as a local time. Only
// valid when the time matches cached time.
year: $NaN,
// String input for which the cached time is valid.
string: null
};
%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
if (%IsConstructCall()) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
return (new $Date()).toString();
}
// ECMA 262 - 15.9.3
var argc = %_ArgumentsLength();
var value;
if (argc == 0) {
%_SetValueOf(this, %DateCurrentTime());
return;
value = %DateCurrentTime();
} else if (argc == 1) {
if (IS_NUMBER(year)) {
value = TimeClip(year);
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
// given time, we re-use that instead of parsing the string again.
var cache = Date_cache;
if (cache.string === year) {
value = cache.time;
} else {
value = DateParse(year);
cache.time = value;
cache.year = YearFromTime(LocalTimeNoCheck(value));
cache.string = year;
}
if (argc == 1) {
} else {
// According to ECMA 262, no hint should be given for this
// conversion. However, ToPrimitive defaults to String Hint
// for Date objects which will lose precision when the Date
// conversion. However, ToPrimitive defaults to STRING_HINT for
// Date objects which will lose precision when the Date
// constructor is called with another Date object as its
// argument. We therefore use Number Hint for the conversion
// (which is the default for everything else than Date
// objects). This makes us behave like KJS and SpiderMonkey.
// argument. We therefore use NUMBER_HINT for the conversion,
// which is the default for everything else than Date objects.
// This makes us behave like KJS and SpiderMonkey.
var time = ToPrimitive(year, NUMBER_HINT);
if (IS_STRING(time)) {
%_SetValueOf(this, DateParse(time));
} else {
%_SetValueOf(this, TimeClip(ToNumber(time)));
}
return;
value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
}
} else {
year = ToNumber(year);
month = ToNumber(month);
date = argc > 2 ? ToNumber(date) : 1;
@ -454,120 +489,118 @@ function TimeClip(time) {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
%_SetValueOf(this, TimeClip(UTC(MakeDate(day, time))));
} else {
// ECMA 262 - 15.9.2
return (new $Date()).toString();
value = TimeClip(UTC(MakeDate(day, time)));
}
%_SetValueOf(this, value);
});
// Helper functions.
function GetTimeFrom(aDate) {
if (IS_DATE(aDate)) return %_ValueOf(aDate);
throw new $TypeError('this is not a Date object.');
return DATE_VALUE(aDate);
}
function GetMillisecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return msFromTime(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return msFromTime(t);
}
function GetSecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(t);
}
function GetMinutesFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(t);
}
function GetHoursFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(t);
}
function GetFullYearFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
// Ignore the DST offset for year computations.
return YearFromTime(t + local_time_offset);
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
return YearFromTime(LocalTimeNoCheck(t));
}
function GetUTCFullYearFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return YearFromTime(t);
}
function GetMonthFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(t);
}
function GetDateFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(t);
}
@ -629,7 +662,7 @@ function DatePrintString(time) {
// -------------------------------------------------------------------
// Reused output buffer.
// Reused output buffer. Used when parsing date strings.
var parse_buffer = $Array(7);
// ECMA 262 - 15.9.4.2
@ -659,7 +692,7 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) {
minutes = argc > 4 ? ToNumber(minutes) : 0;
seconds = argc > 5 ? ToNumber(seconds) : 0;
ms = argc > 6 ? ToNumber(ms) : 0;
year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, month, date);
var time = MakeTime(hours, minutes, seconds, ms);
@ -676,24 +709,24 @@ function DateNow() {
// ECMA 262 - 15.9.5.2
function DateToString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
}
// ECMA 262 - 15.9.5.3
function DateToDateString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DateString(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.4
function DateToTimeString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var lt = LocalTimeNoCheck(t);
return TimeString(lt) + LocalTimezoneString(lt);
}
@ -707,16 +740,16 @@ function DateToLocaleString() {
// ECMA 262 - 15.9.5.6
function DateToLocaleDateString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return LongDateString(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.7
function DateToLocaleTimeString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var lt = LocalTimeNoCheck(t);
return TimeString(lt);
}
@ -724,13 +757,13 @@ function DateToLocaleTimeString() {
// ECMA 262 - 15.9.5.8
function DateValueOf() {
return GetTimeFrom(this);
return DATE_VALUE(this);
}
// ECMA 262 - 15.9.5.9
function DateGetTime() {
return GetTimeFrom(this);
return DATE_VALUE(this);
}
@ -772,16 +805,16 @@ function DateGetUTCDate() {
// ECMA 262 - 15.9.5.16
function DateGetDay() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return t;
var t = %_ValueOf(this);
if (NUMBER_IS_NAN(t)) return t;
return WeekDay(LocalTimeNoCheck(t));
}
// ECMA 262 - 15.9.5.17
function DateGetUTCDay() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return t;
var t = %_ValueOf(this);
if (NUMBER_IS_NAN(t)) return t;
return WeekDay(t);
}
@ -836,22 +869,22 @@ function DateGetUTCMilliseconds() {
// ECMA 262 - 15.9.5.26
function DateGetTimezoneOffset() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return t;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return (t - LocalTimeNoCheck(t)) / msPerMinute;
}
// ECMA 262 - 15.9.5.27
function DateSetTime(ms) {
if (!IS_DATE(this)) throw new $TypeError('this is not a Date object.');
if (!IS_DATE(this)) ThrowDateTypeError();
return %_SetValueOf(this, TimeClip(ToNumber(ms)));
}
// ECMA 262 - 15.9.5.28
function DateSetMilliseconds(ms) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
@ -860,7 +893,7 @@ function DateSetMilliseconds(ms) {
// ECMA 262 - 15.9.5.29
function DateSetUTCMilliseconds(ms) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
@ -869,7 +902,7 @@ function DateSetUTCMilliseconds(ms) {
// ECMA 262 - 15.9.5.30
function DateSetSeconds(sec, ms) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
@ -879,7 +912,7 @@ function DateSetSeconds(sec, ms) {
// ECMA 262 - 15.9.5.31
function DateSetUTCSeconds(sec, ms) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
@ -889,7 +922,7 @@ function DateSetUTCSeconds(sec, ms) {
// ECMA 262 - 15.9.5.33
function DateSetMinutes(min, sec, ms) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
@ -901,7 +934,7 @@ function DateSetMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCMinutes(min, sec, ms) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
min = ToNumber(min);
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
@ -913,7 +946,7 @@ function DateSetUTCMinutes(min, sec, ms) {
// ECMA 262 - 15.9.5.35
function DateSetHours(hour, min, sec, ms) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min);
@ -926,7 +959,7 @@ function DateSetHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.34
function DateSetUTCHours(hour, min, sec, ms) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
hour = ToNumber(hour);
var argc = %_ArgumentsLength();
min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min);
@ -939,7 +972,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
// ECMA 262 - 15.9.5.36
function DateSetDate(date) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
@ -948,7 +981,7 @@ function DateSetDate(date) {
// ECMA 262 - 15.9.5.37
function DateSetUTCDate(date) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
@ -957,7 +990,7 @@ function DateSetUTCDate(date) {
// ECMA 262 - 15.9.5.38
function DateSetMonth(month, date) {
var t = LocalTime(GetTimeFrom(this));
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
@ -967,7 +1000,7 @@ function DateSetMonth(month, date) {
// ECMA 262 - 15.9.5.39
function DateSetUTCMonth(month, date) {
var t = GetTimeFrom(this);
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
@ -977,8 +1010,8 @@ function DateSetUTCMonth(month, date) {
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
var t = GetTimeFrom(this);
t = $isNaN(t) ? 0 : LocalTimeNoCheck(t);
var t = DATE_VALUE(this);
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
@ -990,8 +1023,8 @@ function DateSetFullYear(year, month, date) {
// ECMA 262 - 15.9.5.41
function DateSetUTCFullYear(year, month, date) {
var t = GetTimeFrom(this);
if ($isNaN(t)) t = 0;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
@ -1003,8 +1036,8 @@ function DateSetUTCFullYear(year, month, date) {
// ECMA 262 - 15.9.5.42
function DateToUTCString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
+ TwoDigitString(DateFromTime(t)) + ' '
@ -1016,18 +1049,18 @@ function DateToUTCString() {
// ECMA 262 - B.2.4
function DateGetYear() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return $NaN;
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
return YearFromTime(LocalTimeNoCheck(t)) - 1900;
}
// ECMA 262 - B.2.5
function DateSetYear(year) {
var t = LocalTime(GetTimeFrom(this));
if ($isNaN(t)) t = 0;
var t = LocalTime(DATE_VALUE(this));
if (NUMBER_IS_NAN(t)) t = 0;
year = ToNumber(year);
if ($isNaN(year)) return %_SetValueOf(this, $NaN);
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));

66
deps/v8/src/debug-delay.js

@ -223,7 +223,8 @@ function IsBreakPointTriggered(break_id, break_point) {
// Object representing a script break point. The script is referenced by its
// script name or script id and the break point is represented as line and
// column.
function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column) {
function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
opt_groupId) {
this.type_ = type;
if (type == Debug.ScriptBreakPointType.ScriptId) {
this.script_id_ = script_id_or_name;
@ -232,6 +233,7 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column) {
}
this.line_ = opt_line || 0;
this.column_ = opt_column;
this.groupId_ = opt_groupId;
this.hit_count_ = 0;
this.active_ = true;
this.condition_ = null;
@ -244,6 +246,11 @@ ScriptBreakPoint.prototype.number = function() {
};
ScriptBreakPoint.prototype.groupId = function() {
return this.groupId_;
};
ScriptBreakPoint.prototype.type = function() {
return this.type_;
};
@ -611,10 +618,12 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) {
// Sets a breakpoint in a script identified through id or name at the
// specified source line and column within that line.
Debug.setScriptBreakPoint = function(type, script_id_or_name,
opt_line, opt_column, opt_condition) {
opt_line, opt_column, opt_condition,
opt_groupId) {
// Create script break point object.
var script_break_point =
new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column);
new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
opt_groupId);
// Assign number to the new script break point and add it.
script_break_point.number_ = next_break_point_number++;
@ -636,19 +645,19 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name,
Debug.setScriptBreakPointById = function(script_id,
opt_line, opt_column,
opt_condition) {
opt_condition, opt_groupId) {
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
script_id, opt_line, opt_column,
opt_condition)
opt_condition, opt_groupId);
}
Debug.setScriptBreakPointByName = function(script_name,
opt_line, opt_column,
opt_condition) {
opt_condition, opt_groupId) {
return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
script_name, opt_line, opt_column,
opt_condition)
opt_condition, opt_groupId);
}
@ -991,7 +1000,6 @@ CompileEvent.prototype.toJSONProtocol = function() {
}
o.body = {};
o.body.script = this.script_;
o.setOption('includeSource', true);
return o.toJSONProtocol();
}
@ -1211,6 +1219,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.changeBreakPointRequest_(request, response);
} else if (request.command == 'clearbreakpoint') {
this.clearBreakPointRequest_(request, response);
} else if (request.command == 'clearbreakpointgroup') {
this.clearBreakPointGroupRequest_(request, response);
} else if (request.command == 'backtrace') {
this.backtraceRequest_(request, response);
} else if (request.command == 'frame') {
@ -1326,6 +1336,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
true : request.arguments.enabled;
var condition = request.arguments.condition;
var ignoreCount = request.arguments.ignoreCount;
var groupId = request.arguments.groupId;
// Check for legal arguments.
if (!type || IS_UNDEFINED(target)) {
@ -1379,10 +1390,11 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
} else if (type == 'script') {
// set script break point.
break_point_number =
Debug.setScriptBreakPointByName(target, line, column, condition);
Debug.setScriptBreakPointByName(target, line, column, condition,
groupId);
} else { // type == 'scriptId.
break_point_number =
Debug.setScriptBreakPointById(target, line, column, condition);
Debug.setScriptBreakPointById(target, line, column, condition, groupId);
}
// Set additional break point properties.
@ -1455,6 +1467,40 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, res
}
DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
// Check for legal request.
if (!request.arguments) {
response.failed('Missing arguments');
return;
}
// Pull out arguments.
var group_id = request.arguments.groupId;
// Check for legal arguments.
if (!group_id) {
response.failed('Missing argument "groupId"');
return;
}
var cleared_break_points = [];
var new_script_break_points = [];
for (var i = 0; i < script_break_points.length; i++) {
var next_break_point = script_break_points[i];
if (next_break_point.groupId() == group_id) {
cleared_break_points.push(next_break_point.number());
next_break_point.clear();
} else {
new_script_break_points.push(next_break_point);
}
}
script_break_points = new_script_break_points;
// Add the cleared break point numbers to the response.
response.body = { breakpoints: cleared_break_points };
}
DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
// Check for legal request.
if (!request.arguments) {

22
deps/v8/src/debug.cc

@ -1260,6 +1260,7 @@ void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) {
// Handle stepping into a function.
void Debug::HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
Address fp,
bool is_constructor) {
// If the frame pointer is not supplied by the caller find it.
@ -1285,21 +1286,12 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
Builtins::builtin(Builtins::FunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. At the point of the call IC to call either
// Builtins::FunctionApply or Builtins::FunctionCall the expression
// stack has the following content:
// symbol "apply" or "call"
// function apply or call was called on
// receiver for apply or call (first parameter to apply or call)
// ... further arguments to apply or call.
JavaScriptFrameIterator it;
ASSERT(it.frame()->fp() == fp);
ASSERT(it.frame()->GetExpression(1)->IsJSFunction());
if (it.frame()->GetExpression(1)->IsJSFunction()) {
Handle<JSFunction>
actual_function(JSFunction::cast(it.frame()->GetExpression(1)));
Handle<SharedFunctionInfo> actual_shared(actual_function->shared());
Debug::FloodWithOneShot(actual_shared);
// Builtins::FunctionCall. The receiver of call/apply is the target
// function.
if (!holder.is_null() && holder->IsJSFunction()) {
Handle<SharedFunctionInfo> shared_info(
JSFunction::cast(*holder)->shared());
Debug::FloodWithOneShot(shared_info);
}
} else {
Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));

5
deps/v8/src/debug.h

@ -270,6 +270,7 @@ class Debug {
static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
static void HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
Address fp,
bool is_constructor);
static Address step_in_fp() { return thread_local_.step_into_fp_; }
@ -363,6 +364,10 @@ class Debug {
static const int kIa32CallInstructionLength = 5;
static const int kIa32JSReturnSequenceLength = 6;
// The x64 JS return sequence is padded with int3 to make it large
// enough to hold a call instruction when the debugger patches it.
static const int kX64JSReturnSequenceLength = 13;
// Code generator routines.
static void GenerateLoadICDebugBreak(MacroAssembler* masm);
static void GenerateStoreICDebugBreak(MacroAssembler* masm);

7
deps/v8/src/disassembler.cc

@ -239,6 +239,13 @@ static int DecodeIt(FILE* f,
InlineCacheState ic_state = code->ic_state();
out.AddFormatted(" %s, %s", Code::Kind2String(kind),
Code::ICState2String(ic_state));
if (ic_state == MONOMORPHIC) {
PropertyType type = code->type();
out.AddFormatted(", %s", Code::PropertyType2String(type));
}
if (code->ic_in_loop() == IN_LOOP) {
out.AddFormatted(", in_loop");
}
if (kind == Code::CALL_IC) {
out.AddFormatted(", argc = %d", code->arguments_count());
}

2
deps/v8/src/execution.cc

@ -38,6 +38,8 @@
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#else
#error Unsupported target architecture.
#endif
#include "debug.h"

45
deps/v8/src/factory.cc

@ -49,9 +49,17 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) {
}
Handle<Dictionary> Factory::NewDictionary(int at_least_space_for) {
Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(Dictionary::Allocate(at_least_space_for), Dictionary);
CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for),
StringDictionary);
}
Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for),
NumberDictionary);
}
@ -562,12 +570,10 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
int descriptor_count = 0;
// Copy the descriptors from the array.
DescriptorWriter w(*result);
for (DescriptorReader r(*array); !r.eos(); r.advance()) {
if (!r.IsNullDescriptor()) {
w.WriteFrom(&r);
for (int i = 0; i < array->number_of_descriptors(); i++) {
if (array->GetType(i) != NULL_DESCRIPTOR) {
result->CopyFrom(descriptor_count++, *array, i);
}
descriptor_count++;
}
// Number of duplicates detected.
@ -586,7 +592,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
if (result->LinearSearch(*key, descriptor_count) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
w.Write(&desc);
result->Set(descriptor_count, &desc);
descriptor_count++;
} else {
duplicates++;
@ -596,13 +602,11 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors(
// If duplicates were detected, allocate a result of the right size
// and transfer the elements.
if (duplicates > 0) {
int number_of_descriptors = result->number_of_descriptors() - duplicates;
Handle<DescriptorArray> new_result =
NewDescriptorArray(result->number_of_descriptors() - duplicates);
DescriptorWriter w(*new_result);
DescriptorReader r(*result);
while (!w.eos()) {
w.WriteFrom(&r);
r.advance();
NewDescriptorArray(number_of_descriptors);
for (int i = 0; i < number_of_descriptors; i++) {
new_result->CopyFrom(i, *result, i);
}
result = new_result;
}
@ -619,6 +623,14 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
Handle<GlobalObject> Factory::NewGlobalObject(
Handle<JSFunction> constructor) {
CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor),
GlobalObject);
}
Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED),
JSObject);
@ -647,10 +659,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) {
}
Handle<Dictionary> Factory::DictionaryAtNumberPut(Handle<Dictionary> dictionary,
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), Dictionary);
CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary);
}

23
deps/v8/src/factory.h

@ -28,6 +28,7 @@
#ifndef V8_FACTORY_H_
#define V8_FACTORY_H_
#include "globals.h"
#include "heap.h"
#include "zone-inl.h"
@ -47,7 +48,9 @@ class Factory : public AllStatic {
// Allocate a new fixed array with non-existing entries (the hole).
static Handle<FixedArray> NewFixedArrayWithHoles(int size);
static Handle<Dictionary> NewDictionary(int at_least_space_for);
static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@ -183,6 +186,9 @@ class Factory : public AllStatic {
static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
// Global objects are pretenured.
static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor);
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
@ -294,13 +300,19 @@ class Factory : public AllStatic {
Handle<JSObject> instance,
bool* pending_exception);
#define ROOT_ACCESSOR(type, name) \
static Handle<type> name() { return Handle<type>(&Heap::name##_); }
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline Handle<type> name() { \
return Handle<type>(bit_cast<type**, Object**>( \
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR_ACCESSOR
#define SYMBOL_ACCESSOR(name, str) \
static Handle<String> name() { return Handle<String>(&Heap::name##_); }
static inline Handle<String> name() { \
return Handle<String>(bit_cast<String**, Object**>( \
&Heap::roots_[Heap::k##name##RootIndex])); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
@ -310,7 +322,8 @@ class Factory : public AllStatic {
static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
static Handle<Dictionary> DictionaryAtNumberPut(Handle<Dictionary>,
static Handle<NumberDictionary> DictionaryAtNumberPut(
Handle<NumberDictionary>,
uint32_t key,
Handle<Object> value);

3
deps/v8/src/flag-definitions.h

@ -110,6 +110,7 @@ DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_string(natives_file, NULL, "alternative natives file")
DEFINE_bool(expose_gc, false, "expose gc extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
// builtins-ia32.cc
DEFINE_bool(inline_new, true, "use fast inline allocation")
@ -207,8 +208,6 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_native, true,
"use native code regexp implementation (IA32 only)")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc

2
deps/v8/src/frames-inl.h

@ -36,6 +36,8 @@
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {

17
deps/v8/src/globals.h

@ -198,7 +198,8 @@ class FixedArray;
class FunctionEntry;
class FunctionLiteral;
class FunctionTemplateInfo;
class Dictionary;
class NumberDictionary;
class StringDictionary;
class FreeStoreAllocationPolicy;
template <typename T> class Handle;
class Heap;
@ -257,13 +258,15 @@ typedef bool (*WeakSlotCallback)(Object** pointer);
// consecutive.
enum AllocationSpace {
NEW_SPACE, // Semispaces collected with copying collector.
OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces.
OLD_DATA_SPACE, // May not have pointers to new space.
CODE_SPACE, // Also one of the old spaces. Marked executable.
MAP_SPACE, // Only map objects.
LO_SPACE, // Large objects.
OLD_POINTER_SPACE, // May contain pointers to new space.
OLD_DATA_SPACE, // Must not have pointers to new space.
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
CELL_SPACE, // Only and all cell objects.
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer)
LAST_SPACE = LO_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;

28
deps/v8/src/handles.cc

@ -289,10 +289,11 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// hidden symbols hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
DescriptorReader r(descriptors, 0); // Explicitly position reader at zero.
if (!r.eos() && (r.GetKey() == *key) && r.IsProperty()) {
ASSERT(r.type() == FIELD);
return Handle<Object>(obj->FastPropertyAt(r.GetFieldIndex()));
if ((descriptors->number_of_descriptors() > 0) &&
(descriptors->GetKey(0) == *key) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
}
}
@ -372,10 +373,10 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
Handle<Object> cache(reinterpret_cast<Object**>(script->wrapper()->proxy()));
if (!cache.is_null()) {
if (script->wrapper()->proxy() != NULL) {
// Return the script wrapper directly from the cache.
return Handle<JSValue>(JSValue::cast(*cache));
return Handle<JSValue>(
reinterpret_cast<JSValue**>(script->wrapper()->proxy()));
}
// Construct a new script wrapper.
@ -588,12 +589,13 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
int num_enum = object->NumberOfEnumProperties();
Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
for (DescriptorReader r(object->map()->instance_descriptors());
!r.eos();
r.advance()) {
if (r.IsProperty() && !r.IsDontEnum()) {
(*storage)->set(index, r.GetKey());
(*sort_array)->set(index, Smi::FromInt(r.GetDetails().index()));
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(object->map()->instance_descriptors());
for (int i = 0; i < descs->number_of_descriptors(); i++) {
if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
(*storage)->set(index, descs->GetKey(i));
PropertyDetails details(descs->GetDetails(i));
(*sort_array)->set(index, Smi::FromInt(details.index()));
index++;
}
}

19
deps/v8/src/heap-inl.h

@ -82,6 +82,8 @@ Object* Heap::AllocateRaw(int size_in_bytes,
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
result = map_space_->AllocateRaw(size_in_bytes);
@ -107,12 +109,23 @@ Object* Heap::NumberFromUint32(uint32_t value) {
}
Object* Heap::AllocateRawMap(int size_in_bytes) {
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = map_space_->AllocateRaw(size_in_bytes);
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
Object* Heap::AllocateRawCell() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
@ -216,7 +229,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
void Heap::SetLastScriptId(Object* last_script_id) {
last_script_id_ = last_script_id;
roots_[kLastScriptIdRootIndex] = last_script_id;
}

488
deps/v8/src/heap.cc

@ -43,27 +43,17 @@
namespace v8 {
namespace internal {
#define ROOT_ALLOCATION(type, name) type* Heap::name##_;
ROOT_LIST(ROOT_ALLOCATION)
#undef ROOT_ALLOCATION
#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_;
STRUCT_LIST(STRUCT_ALLOCATION)
#undef STRUCT_ALLOCATION
#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_;
SYMBOL_LIST(SYMBOL_ALLOCATION)
#undef SYMBOL_ALLOCATION
String* Heap::hidden_symbol_;
Object* Heap::roots_[Heap::kRootListLength];
NewSpace Heap::new_space_;
OldSpace* Heap::old_pointer_space_ = NULL;
OldSpace* Heap::old_data_space_ = NULL;
OldSpace* Heap::code_space_ = NULL;
MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
static const int kMinimumPromotionLimit = 2*MB;
@ -79,7 +69,7 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize.
#if V8_HOST_ARCH_ARM
#if V8_TARGET_ARCH_ARM
int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
@ -121,7 +111,8 @@ int Heap::Capacity() {
old_pointer_space_->Capacity() +
old_data_space_->Capacity() +
code_space_->Capacity() +
map_space_->Capacity();
map_space_->Capacity() +
cell_space_->Capacity();
}
@ -132,7 +123,8 @@ int Heap::Available() {
old_pointer_space_->Available() +
old_data_space_->Available() +
code_space_->Available() +
map_space_->Available();
map_space_->Available() +
cell_space_->Available();
}
@ -141,6 +133,7 @@ bool Heap::HasBeenSetup() {
old_data_space_ != NULL &&
code_space_ != NULL &&
map_space_ != NULL &&
cell_space_ != NULL &&
lo_space_ != NULL;
}
@ -221,6 +214,7 @@ void Heap::ReportStatisticsAfterGC() {
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
@ -283,9 +277,8 @@ void Heap::GarbageCollectionEpilogue() {
Counters::alive_after_last_gc.Set(SizeOfObjects());
SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_);
Counters::symbol_table_capacity.Set(symbol_table->Capacity());
Counters::number_of_symbols.Set(symbol_table->NumberOfElements());
Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsAfterGC();
#endif
@ -370,6 +363,8 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
return code_space_->Available() >= requested_size;
case MAP_SPACE:
return map_space_->Available() >= requested_size;
case CELL_SPACE:
return cell_space_->Available() >= requested_size;
case LO_SPACE:
return lo_space_->Available() >= requested_size;
}
@ -404,8 +399,7 @@ class SymbolTableVerifier : public ObjectVisitor {
static void VerifySymbolTable() {
#ifdef DEBUG
SymbolTableVerifier verifier;
SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
symbol_table->IterateElements(&verifier);
Heap::symbol_table()->IterateElements(&verifier);
#endif // DEBUG
}
@ -428,22 +422,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
// If we have used the mark-compact collector to collect the new
// space, and it has not compacted the new space, we force a
// separate scavenge collection. This is a hack. It covers the
// case where (1) a new space collection was requested, (2) the
// collector selection policy selected the mark-compact collector,
// and (3) the mark-compact collector policy selected not to
// compact the new space. In that case, there is no more (usable)
// free space in the new space after the collection compared to
// before.
if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) {
Scavenge();
}
} else {
Scavenge();
}
Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing();
@ -621,6 +601,7 @@ static void VerifyNonPointerSpacePointers() {
}
#endif
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@ -679,7 +660,7 @@ void Heap::Scavenge() {
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
#if V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Make this go away again. We currently disable RSets for
// 64-bit-mode.
HeapObjectIterator old_pointer_iterator(old_pointer_space_);
@ -699,13 +680,25 @@ void Heap::Scavenge() {
heap_object->Iterate(&scavenge_visitor);
}
}
#else // V8_HOST_ARCH_64_BIT
#else // !defined(V8_HOST_ARCH_64_BIT)
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
#endif // V8_HOST_ARCH_64_BIT
#endif
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
while (cell_iterator.has_next()) {
HeapObject* cell = cell_iterator.next();
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
(JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
do {
ASSERT(new_space_front <= new_space_.top());
@ -845,8 +838,8 @@ int Heap::UpdateRSet(HeapObject* obj) {
void Heap::RebuildRSets() {
// By definition, we do not care about remembered set bits in code or data
// spaces.
// By definition, we do not care about remembered set bits in code,
// data, or cell spaces.
map_space_->ClearRSet();
RebuildRSets(map_space_);
@ -1021,11 +1014,11 @@ void Heap::ScavengePointer(HeapObject** p) {
Object* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = AllocateRawMap(Map::kSize);
Object* result = AllocateRawMap();
if (result->IsFailure()) return result;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map(meta_map());
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
@ -1035,7 +1028,7 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Object* result = AllocateRawMap(Map::kSize);
Object* result = AllocateRawMap();
if (result->IsFailure()) return result;
Map* map = reinterpret_cast<Map*>(result);
@ -1054,36 +1047,59 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
{contents, k##name##RootIndex},
SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
#undef CONSTANT_SYMBOL_ELEMENT
};
const Heap::StructTable Heap::struct_table[] = {
#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
{ NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
};
bool Heap::CreateInitialMaps() {
Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (obj->IsFailure()) return false;
// Map::cast cannot be used due to uninitialized map field.
meta_map_ = reinterpret_cast<Map*>(obj);
meta_map()->set_map(meta_map());
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map);
obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
if (obj->IsFailure()) return false;
fixed_array_map_ = Map::cast(obj);
set_fixed_array_map(Map::cast(obj));
obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
if (obj->IsFailure()) return false;
oddball_map_ = Map::cast(obj);
set_oddball_map(Map::cast(obj));
// Allocate the empty array
obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false;
empty_fixed_array_ = FixedArray::cast(obj);
set_empty_fixed_array(FixedArray::cast(obj));
obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (obj->IsFailure()) return false;
null_value_ = obj;
set_null_value(obj);
// Allocate the empty descriptor array. AllocateMap can now be used.
// Allocate the empty descriptor array.
obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false;
// There is a check against empty_descriptor_array() in cast().
empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj);
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
meta_map()->set_instance_descriptors(empty_descriptor_array());
@ -1101,100 +1117,106 @@ bool Heap::CreateInitialMaps() {
fixed_array_map()->set_prototype(null_value());
fixed_array_map()->set_constructor(null_value());
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (obj->IsFailure()) return false;
heap_number_map_ = Map::cast(obj);
set_heap_number_map(Map::cast(obj));
obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
if (obj->IsFailure()) return false;
proxy_map_ = Map::cast(obj);
set_proxy_map(Map::cast(obj));
#define ALLOCATE_STRING_MAP(type, size, name) \
obj = AllocateMap(type, size); \
if (obj->IsFailure()) return false; \
name##_map_ = Map::cast(obj);
STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
#undef ALLOCATE_STRING_MAP
for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
obj = AllocateMap(entry.type, entry.size);
if (obj->IsFailure()) return false;
roots_[entry.index] = Map::cast(obj);
}
obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_short_string_map_ = Map::cast(obj);
undetectable_short_string_map_->set_is_undetectable();
set_undetectable_short_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_medium_string_map_ = Map::cast(obj);
undetectable_medium_string_map_->set_is_undetectable();
set_undetectable_medium_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_long_string_map_ = Map::cast(obj);
undetectable_long_string_map_->set_is_undetectable();
set_undetectable_long_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_short_ascii_string_map_ = Map::cast(obj);
undetectable_short_ascii_string_map_->set_is_undetectable();
set_undetectable_short_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_medium_ascii_string_map_ = Map::cast(obj);
undetectable_medium_ascii_string_map_->set_is_undetectable();
set_undetectable_medium_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize);
if (obj->IsFailure()) return false;
undetectable_long_ascii_string_map_ = Map::cast(obj);
undetectable_long_ascii_string_map_->set_is_undetectable();
set_undetectable_long_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
if (obj->IsFailure()) return false;
byte_array_map_ = Map::cast(obj);
set_byte_array_map(Map::cast(obj));
obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
if (obj->IsFailure()) return false;
code_map_ = Map::cast(obj);
set_code_map(Map::cast(obj));
obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
JSGlobalPropertyCell::kSize);
if (obj->IsFailure()) return false;
set_global_property_cell_map(Map::cast(obj));
obj = AllocateMap(FILLER_TYPE, kPointerSize);
if (obj->IsFailure()) return false;
one_word_filler_map_ = Map::cast(obj);
set_one_pointer_filler_map(Map::cast(obj));
obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
if (obj->IsFailure()) return false;
two_word_filler_map_ = Map::cast(obj);
set_two_pointer_filler_map(Map::cast(obj));
#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \
obj = AllocateMap(NAME##_TYPE, Name::kSize); \
if (obj->IsFailure()) return false; \
name##_map_ = Map::cast(obj);
STRUCT_LIST(ALLOCATE_STRUCT_MAP)
#undef ALLOCATE_STRUCT_MAP
for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
const StructTable& entry = struct_table[i];
obj = AllocateMap(entry.type, entry.size);
if (obj->IsFailure()) return false;
roots_[entry.index] = Map::cast(obj);
}
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
if (obj->IsFailure()) return false;
hash_table_map_ = Map::cast(obj);
set_hash_table_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
if (obj->IsFailure()) return false;
context_map_ = Map::cast(obj);
set_context_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
if (obj->IsFailure()) return false;
catch_context_map_ = Map::cast(obj);
set_catch_context_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize);
if (obj->IsFailure()) return false;
global_context_map_ = Map::cast(obj);
set_global_context_map(Map::cast(obj));
obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize);
if (obj->IsFailure()) return false;
boilerplate_function_map_ = Map::cast(obj);
set_boilerplate_function_map(Map::cast(obj));
obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize);
if (obj->IsFailure()) return false;
shared_function_info_map_ = Map::cast(obj);
set_shared_function_info_map(Map::cast(obj));
ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
return true;
@ -1230,6 +1252,15 @@ Object* Heap::AllocateHeapNumber(double value) {
}
Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
Object* result = AllocateRawCell();
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
return result;
}
Object* Heap::CreateOddball(Map* map,
const char* to_string,
Object* to_number) {
@ -1244,41 +1275,62 @@ bool Heap::CreateApiObjects() {
obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (obj->IsFailure()) return false;
neander_map_ = Map::cast(obj);
set_neander_map(Map::cast(obj));
obj = Heap::AllocateJSObjectFromMap(neander_map_);
obj = Heap::AllocateJSObjectFromMap(neander_map());
if (obj->IsFailure()) return false;
Object* elements = AllocateFixedArray(2);
if (elements->IsFailure()) return false;
FixedArray::cast(elements)->set(0, Smi::FromInt(0));
JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
message_listeners_ = JSObject::cast(obj);
set_message_listeners(JSObject::cast(obj));
return true;
}
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope;
{
void Heap::CreateCEntryStub() {
CEntryStub stub;
c_entry_code_ = *stub.GetCode();
set_c_entry_code(*stub.GetCode());
}
{
void Heap::CreateCEntryDebugBreakStub() {
CEntryDebugBreakStub stub;
c_entry_debug_break_code_ = *stub.GetCode();
set_c_entry_debug_break_code(*stub.GetCode());
}
{
void Heap::CreateJSEntryStub() {
JSEntryStub stub;
js_entry_code_ = *stub.GetCode();
set_js_entry_code(*stub.GetCode());
}
{
void Heap::CreateJSConstructEntryStub() {
JSConstructEntryStub stub;
js_construct_entry_code_ = *stub.GetCode();
set_js_construct_entry_code(*stub.GetCode());
}
void Heap::CreateFixedStubs() {
// Here we create roots for fixed stubs. They are needed at GC
// for cooking and uncooking (check out frames.cc).
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
HandleScope scope;
// gcc-4.4 has problem generating correct code of following snippet:
// { CEntryStub stub;
// c_entry_code_ = *stub.GetCode();
// }
// { CEntryDebugBreakStub stub;
// c_entry_debug_break_code_ = *stub.GetCode();
// }
// To workaround the problem, make separate functions without inlining.
Heap::CreateCEntryStub();
Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
}
@ -1288,34 +1340,35 @@ bool Heap::CreateInitialObjects() {
// The -0 value must be set before NumberFromDouble works.
obj = AllocateHeapNumber(-0.0, TENURED);
if (obj->IsFailure()) return false;
minus_zero_value_ = obj;
ASSERT(signbit(minus_zero_value_->Number()) != 0);
set_minus_zero_value(obj);
ASSERT(signbit(minus_zero_value()->Number()) != 0);
obj = AllocateHeapNumber(OS::nan_value(), TENURED);
if (obj->IsFailure()) return false;
nan_value_ = obj;
set_nan_value(obj);
obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (obj->IsFailure()) return false;
undefined_value_ = obj;
set_undefined_value(obj);
ASSERT(!InNewSpace(undefined_value()));
// Allocate initial symbol table.
obj = SymbolTable::Allocate(kInitialSymbolTableSize);
if (obj->IsFailure()) return false;
symbol_table_ = obj;
// Don't use set_symbol_table() due to asserts.
roots_[kSymbolTableRootIndex] = obj;
// Assign the print strings for oddballs after creating symboltable.
Object* symbol = LookupAsciiSymbol("undefined");
if (symbol->IsFailure()) return false;
Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol));
Oddball::cast(undefined_value_)->set_to_number(nan_value_);
Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
Oddball::cast(undefined_value())->set_to_number(nan_value());
// Assign the print strings for oddballs after creating symboltable.
symbol = LookupAsciiSymbol("null");
if (symbol->IsFailure()) return false;
Oddball::cast(null_value_)->set_to_string(String::cast(symbol));
Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0));
Oddball::cast(null_value())->set_to_string(String::cast(symbol));
Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
// Allocate the null_value
obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
@ -1323,32 +1376,31 @@ bool Heap::CreateInitialObjects() {
obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
if (obj->IsFailure()) return false;
true_value_ = obj;
set_true_value(obj);
obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
if (obj->IsFailure()) return false;
false_value_ = obj;
set_false_value(obj);
obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
if (obj->IsFailure()) return false;
the_hole_value_ = obj;
set_the_hole_value(obj);
// Allocate the empty string.
obj = AllocateRawAsciiString(0, TENURED);
if (obj->IsFailure()) return false;
empty_string_ = String::cast(obj);
set_empty_string(String::cast(obj));
#define SYMBOL_INITIALIZE(name, string) \
obj = LookupAsciiSymbol(string); \
if (obj->IsFailure()) return false; \
(name##_) = String::cast(obj);
SYMBOL_LIST(SYMBOL_INITIALIZE)
#undef SYMBOL_INITIALIZE
for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
obj = LookupAsciiSymbol(constant_symbol_table[i].contents);
if (obj->IsFailure()) return false;
roots_[constant_symbol_table[i].index] = String::cast(obj);
}
// Allocate the hidden symbol which is used to identify the hidden properties
// in JSObjects. The hash code has a special value so that it will not match
// the empty string when searching for the property. It cannot be part of the
// SYMBOL_LIST because it needs to be allocated manually with the special
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
@ -1358,37 +1410,37 @@ bool Heap::CreateInitialObjects() {
// Allocate the proxy for __proto__.
obj = AllocateProxy((Address) &Accessors::ObjectPrototype);
if (obj->IsFailure()) return false;
prototype_accessors_ = Proxy::cast(obj);
set_prototype_accessors(Proxy::cast(obj));
// Allocate the code_stubs dictionary.
obj = Dictionary::Allocate(4);
obj = NumberDictionary::Allocate(4);
if (obj->IsFailure()) return false;
code_stubs_ = Dictionary::cast(obj);
set_code_stubs(NumberDictionary::cast(obj));
// Allocate the non_monomorphic_cache used in stub-cache.cc
obj = Dictionary::Allocate(4);
obj = NumberDictionary::Allocate(4);
if (obj->IsFailure()) return false;
non_monomorphic_cache_ = Dictionary::cast(obj);
set_non_monomorphic_cache(NumberDictionary::cast(obj));
CreateFixedStubs();
// Allocate the number->string conversion cache
obj = AllocateFixedArray(kNumberStringCacheSize * 2);
if (obj->IsFailure()) return false;
number_string_cache_ = FixedArray::cast(obj);
set_number_string_cache(FixedArray::cast(obj));
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
if (obj->IsFailure()) return false;
single_character_string_cache_ = FixedArray::cast(obj);
set_single_character_string_cache(FixedArray::cast(obj));
// Allocate cache for external strings pointing to native source code.
obj = AllocateFixedArray(Natives::GetBuiltinsCount());
if (obj->IsFailure()) return false;
natives_source_cache_ = FixedArray::cast(obj);
set_natives_source_cache(FixedArray::cast(obj));
// Handling of script id generation is in Factory::NewScript.
last_script_id_ = undefined_value();
set_last_script_id(undefined_value());
// Initialize keyed lookup cache.
KeyedLookupCache::Clear();
@ -1426,13 +1478,13 @@ Object* Heap::GetNumberStringCache(Object* number) {
} else {
hash = double_get_hash(number->Number());
}
Object* key = number_string_cache_->get(hash * 2);
Object* key = number_string_cache()->get(hash * 2);
if (key == number) {
return String::cast(number_string_cache_->get(hash * 2 + 1));
return String::cast(number_string_cache()->get(hash * 2 + 1));
} else if (key->IsHeapNumber() &&
number->IsHeapNumber() &&
key->Number() == number->Number()) {
return String::cast(number_string_cache_->get(hash * 2 + 1));
return String::cast(number_string_cache()->get(hash * 2 + 1));
}
return undefined_value();
}
@ -1442,12 +1494,12 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
int hash;
if (number->IsSmi()) {
hash = smi_get_hash(Smi::cast(number));
number_string_cache_->set(hash * 2, number, SKIP_WRITE_BARRIER);
number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
} else {
hash = double_get_hash(number->Number());
number_string_cache_->set(hash * 2, number);
number_string_cache()->set(hash * 2, number);
}
number_string_cache_->set(hash * 2 + 1, string);
number_string_cache()->set(hash * 2 + 1, string);
}
@ -1460,19 +1512,19 @@ Object* Heap::SmiOrNumberFromDouble(double value,
static const DoubleRepresentation plus_zero(0.0);
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation nan(OS::nan_value());
ASSERT(minus_zero_value_ != NULL);
ASSERT(minus_zero_value() != NULL);
ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits));
DoubleRepresentation rep(value);
if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon
if (rep.bits == minus_zero.bits) {
return new_object ? AllocateHeapNumber(-0.0, pretenure)
: minus_zero_value_;
: minus_zero_value();
}
if (rep.bits == nan.bits) {
return new_object
? AllocateHeapNumber(OS::nan_value(), pretenure)
: nan_value_;
: nan_value();
}
// Try to represent the value as a tagged small integer.
@ -1514,7 +1566,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Object* Heap::AllocateSharedFunctionInfo(Object* name) {
Object* result = Allocate(shared_function_info_map(), NEW_SPACE);
Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
@ -1778,7 +1830,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map(Heap::one_word_filler_map());
filler->set_map(Heap::one_pointer_filler_map());
} else {
filler->set_map(Heap::byte_array_map());
ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
@ -2006,7 +2058,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties = AllocateFixedArray(prop_size);
Object* properties = AllocateFixedArray(prop_size, pretenure);
if (properties->IsFailure()) return properties;
// Allocate the JSObject.
@ -2034,7 +2086,39 @@ Object* Heap::AllocateJSObject(JSFunction* constructor,
Map::cast(initial_map)->set_constructor(constructor);
}
// Allocate the object based on the constructors initial map.
return AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
Object* result =
AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
// Make sure result is NOT a global object if valid.
ASSERT(result->IsFailure() || !result->IsGlobalObject());
return result;
}
Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
ASSERT(constructor->has_initial_map());
// Make sure no field properties are described in the initial map.
// This guarantees us that normalizing the properties does not
// require us to change property values to JSGlobalPropertyCells.
ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0);
// Make sure we don't have a ton of pre-allocated slots in the
// global objects. They will be unused once we normalize the object.
ASSERT(constructor->initial_map()->unused_property_fields() == 0);
ASSERT(constructor->initial_map()->inobject_properties() == 0);
// Allocate the object based on the constructors initial map.
Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED);
if (result->IsFailure()) return result;
// Normalize the result.
JSObject* global = JSObject::cast(result);
result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
if (result->IsFailure()) return result;
// Make sure result is a global object with properties in dictionary.
ASSERT(global->IsGlobalObject());
ASSERT(!global->HasFastProperties());
return global;
}
@ -2111,7 +2195,7 @@ Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
// Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties();
Object* properties = AllocateFixedArray(prop_size);
Object* properties = AllocateFixedArray(prop_size, TENURED);
if (properties->IsFailure()) return properties;
// Reset the map for the object.
@ -2500,7 +2584,7 @@ Object* Heap::AllocateHashTable(int length) {
Object* result = Heap::AllocateFixedArray(length);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(hash_table_map());
ASSERT(result->IsDictionary());
ASSERT(result->IsHashTable());
return result;
}
@ -2622,6 +2706,8 @@ void Heap::ReportHeapStatistics(const char* title) {
code_space_->ReportStatistics();
PrintF("Map space : ");
map_space_->ReportStatistics();
PrintF("Cell space : ");
cell_space_->ReportStatistics();
PrintF("Large object space : ");
lo_space_->ReportStatistics();
PrintF(">>>>>> ========================================= >>>>>>\n");
@ -2642,6 +2728,7 @@ bool Heap::Contains(Address addr) {
old_data_space_->Contains(addr) ||
code_space_->Contains(addr) ||
map_space_->Contains(addr) ||
cell_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
}
@ -2666,6 +2753,8 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return code_space_->Contains(addr);
case MAP_SPACE:
return map_space_->Contains(addr);
case CELL_SPACE:
return cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
}
@ -2679,22 +2768,31 @@ void Heap::Verify() {
ASSERT(HasBeenSetup());
VerifyPointersVisitor visitor;
Heap::IterateRoots(&visitor);
IterateRoots(&visitor);
AllSpaces spaces;
while (Space* space = spaces.next()) {
space->Verify();
}
new_space_.Verify();
VerifyPointersAndRSetVisitor rset_visitor;
old_pointer_space_->Verify(&rset_visitor);
map_space_->Verify(&rset_visitor);
VerifyPointersVisitor no_rset_visitor;
old_data_space_->Verify(&no_rset_visitor);
code_space_->Verify(&no_rset_visitor);
cell_space_->Verify(&no_rset_visitor);
lo_space_->Verify();
}
#endif // DEBUG
Object* Heap::LookupSymbol(Vector<const char> string) {
Object* symbol = NULL;
Object* new_table =
SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol);
Object* new_table = symbol_table()->LookupSymbol(string, &symbol);
if (new_table->IsFailure()) return new_table;
symbol_table_ = new_table;
// Can't use set_symbol_table because SymbolTable::cast knows that
// SymbolTable is a singleton and checks for identity.
roots_[kSymbolTableRootIndex] = new_table;
ASSERT(symbol != NULL);
return symbol;
}
@ -2703,10 +2801,11 @@ Object* Heap::LookupSymbol(Vector<const char> string) {
Object* Heap::LookupSymbol(String* string) {
if (string->IsSymbol()) return string;
Object* symbol = NULL;
Object* new_table =
SymbolTable::cast(symbol_table_)->LookupString(string, &symbol);
Object* new_table = symbol_table()->LookupString(string, &symbol);
if (new_table->IsFailure()) return new_table;
symbol_table_ = new_table;
// Can't use set_symbol_table because SymbolTable::cast knows that
// SymbolTable is a singleton and checks for identity.
roots_[kSymbolTableRootIndex] = new_table;
ASSERT(symbol != NULL);
return symbol;
}
@ -2717,8 +2816,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
*symbol = string;
return true;
}
SymbolTable* table = SymbolTable::cast(symbol_table_);
return table->LookupSymbolIfExists(string, symbol);
return symbol_table()->LookupSymbolIfExists(string, symbol);
}
@ -2805,28 +2903,15 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
void Heap::IterateRoots(ObjectVisitor* v) {
IterateStrongRoots(v);
v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_));
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
SYNCHRONIZE_TAG("symbol_table");
}
void Heap::IterateStrongRoots(ObjectVisitor* v) {
#define ROOT_ITERATE(type, name) \
v->VisitPointer(bit_cast<Object**, type**>(&name##_));
STRONG_ROOT_LIST(ROOT_ITERATE);
#undef ROOT_ITERATE
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
SYNCHRONIZE_TAG("strong_root_list");
#define STRUCT_MAP_ITERATE(NAME, Name, name) \
v->VisitPointer(bit_cast<Object**, Map**>(&name##_map_));
STRUCT_LIST(STRUCT_MAP_ITERATE);
#undef STRUCT_MAP_ITERATE
SYNCHRONIZE_TAG("struct_map");
#define SYMBOL_ITERATE(name, string) \
v->VisitPointer(bit_cast<Object**, String**>(&name##_));
SYMBOL_LIST(SYMBOL_ITERATE)
#undef SYMBOL_ITERATE
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
SYNCHRONIZE_TAG("symbol");
@ -2901,6 +2986,7 @@ int Heap::PromotedSpaceSize() {
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
@ -2978,6 +3064,13 @@ bool Heap::Setup(bool create_heap_objects) {
// enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
// Setting up a paged space without giving it a virtual memory range big
// enough to hold at least a page will cause it to allocate.
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
@ -3030,6 +3123,12 @@ void Heap::TearDown() {
map_space_ = NULL;
}
if (cell_space_ != NULL) {
cell_space_->TearDown();
delete cell_space_;
cell_space_ = NULL;
}
if (lo_space_ != NULL) {
lo_space_->TearDown();
delete lo_space_;
@ -3041,11 +3140,9 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink map, old, and code spaces.
map_space_->Shrink();
old_pointer_space_->Shrink();
old_data_space_->Shrink();
code_space_->Shrink();
// Try to shrink all paged spaces.
PagedSpaces spaces;
while (PagedSpace* space = spaces.next()) space->Shrink();
}
@ -3053,24 +3150,16 @@ void Heap::Shrink() {
void Heap::Protect() {
if (HasBeenSetup()) {
new_space_.Protect();
map_space_->Protect();
old_pointer_space_->Protect();
old_data_space_->Protect();
code_space_->Protect();
lo_space_->Protect();
AllSpaces spaces;
while (Space* space = spaces.next()) space->Protect();
}
}
void Heap::Unprotect() {
if (HasBeenSetup()) {
new_space_.Unprotect();
map_space_->Unprotect();
old_pointer_space_->Unprotect();
old_data_space_->Unprotect();
code_space_->Unprotect();
lo_space_->Unprotect();
AllSpaces spaces;
while (Space* space = spaces.next()) space->Unprotect();
}
}
@ -3108,6 +3197,8 @@ Space* AllSpaces::next() {
return Heap::code_space();
case MAP_SPACE:
return Heap::map_space();
case CELL_SPACE:
return Heap::cell_space();
case LO_SPACE:
return Heap::lo_space();
default:
@ -3126,6 +3217,8 @@ PagedSpace* PagedSpaces::next() {
return Heap::code_space();
case MAP_SPACE:
return Heap::map_space();
case CELL_SPACE:
return Heap::cell_space();
default:
return NULL;
}
@ -3199,6 +3292,9 @@ ObjectIterator* SpaceIterator::CreateIterator() {
case MAP_SPACE:
iterator_ = new HeapObjectIterator(Heap::map_space());
break;
case CELL_SPACE:
iterator_ = new HeapObjectIterator(Heap::cell_space());
break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(Heap::lo_space());
break;
@ -3303,7 +3399,7 @@ void HeapProfiler::WriteSample() {
// Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
#define INCREMENT_SIZE(type, size, name) \
#define INCREMENT_SIZE(type, size, name, camel_name) \
string_number += info[type].number(); \
string_bytes += info[type].bytes();
STRING_TYPE_LIST(INCREMENT_SIZE)

295
deps/v8/src/heap.h

@ -35,103 +35,106 @@ namespace internal {
// Defines all the roots in Heap.
#define STRONG_ROOT_LIST(V) \
V(Map, meta_map) \
V(Map, heap_number_map) \
V(Map, short_string_map) \
V(Map, medium_string_map) \
V(Map, long_string_map) \
V(Map, short_ascii_string_map) \
V(Map, medium_ascii_string_map) \
V(Map, long_ascii_string_map) \
V(Map, short_symbol_map) \
V(Map, medium_symbol_map) \
V(Map, long_symbol_map) \
V(Map, short_ascii_symbol_map) \
V(Map, medium_ascii_symbol_map) \
V(Map, long_ascii_symbol_map) \
V(Map, short_cons_symbol_map) \
V(Map, medium_cons_symbol_map) \
V(Map, long_cons_symbol_map) \
V(Map, short_cons_ascii_symbol_map) \
V(Map, medium_cons_ascii_symbol_map) \
V(Map, long_cons_ascii_symbol_map) \
V(Map, short_sliced_symbol_map) \
V(Map, medium_sliced_symbol_map) \
V(Map, long_sliced_symbol_map) \
V(Map, short_sliced_ascii_symbol_map) \
V(Map, medium_sliced_ascii_symbol_map) \
V(Map, long_sliced_ascii_symbol_map) \
V(Map, short_external_symbol_map) \
V(Map, medium_external_symbol_map) \
V(Map, long_external_symbol_map) \
V(Map, short_external_ascii_symbol_map) \
V(Map, medium_external_ascii_symbol_map) \
V(Map, long_external_ascii_symbol_map) \
V(Map, short_cons_string_map) \
V(Map, medium_cons_string_map) \
V(Map, long_cons_string_map) \
V(Map, short_cons_ascii_string_map) \
V(Map, medium_cons_ascii_string_map) \
V(Map, long_cons_ascii_string_map) \
V(Map, short_sliced_string_map) \
V(Map, medium_sliced_string_map) \
V(Map, long_sliced_string_map) \
V(Map, short_sliced_ascii_string_map) \
V(Map, medium_sliced_ascii_string_map) \
V(Map, long_sliced_ascii_string_map) \
V(Map, short_external_string_map) \
V(Map, medium_external_string_map) \
V(Map, long_external_string_map) \
V(Map, short_external_ascii_string_map) \
V(Map, medium_external_ascii_string_map) \
V(Map, long_external_ascii_string_map) \
V(Map, undetectable_short_string_map) \
V(Map, undetectable_medium_string_map) \
V(Map, undetectable_long_string_map) \
V(Map, undetectable_short_ascii_string_map) \
V(Map, undetectable_medium_ascii_string_map) \
V(Map, undetectable_long_ascii_string_map) \
V(Map, byte_array_map) \
V(Map, fixed_array_map) \
V(Map, hash_table_map) \
V(Map, context_map) \
V(Map, catch_context_map) \
V(Map, global_context_map) \
V(Map, code_map) \
V(Map, oddball_map) \
V(Map, boilerplate_function_map) \
V(Map, shared_function_info_map) \
V(Map, proxy_map) \
V(Map, one_word_filler_map) \
V(Map, two_word_filler_map) \
V(Object, nan_value) \
V(Object, undefined_value) \
V(Object, minus_zero_value) \
V(Object, null_value) \
V(Object, true_value) \
V(Object, false_value) \
V(String, empty_string) \
V(FixedArray, empty_fixed_array) \
V(DescriptorArray, empty_descriptor_array) \
V(Object, the_hole_value) \
V(Map, neander_map) \
V(JSObject, message_listeners) \
V(Proxy, prototype_accessors) \
V(Dictionary, code_stubs) \
V(Dictionary, non_monomorphic_cache) \
V(Code, js_entry_code) \
V(Code, js_construct_entry_code) \
V(Code, c_entry_code) \
V(Code, c_entry_debug_break_code) \
V(FixedArray, number_string_cache) \
V(FixedArray, single_character_string_cache) \
V(FixedArray, natives_source_cache) \
V(Object, last_script_id)
V(Map, meta_map, MetaMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, short_string_map, ShortStringMap) \
V(Map, medium_string_map, MediumStringMap) \
V(Map, long_string_map, LongStringMap) \
V(Map, short_ascii_string_map, ShortAsciiStringMap) \
V(Map, medium_ascii_string_map, MediumAsciiStringMap) \
V(Map, long_ascii_string_map, LongAsciiStringMap) \
V(Map, short_symbol_map, ShortSymbolMap) \
V(Map, medium_symbol_map, MediumSymbolMap) \
V(Map, long_symbol_map, LongSymbolMap) \
V(Map, short_ascii_symbol_map, ShortAsciiSymbolMap) \
V(Map, medium_ascii_symbol_map, MediumAsciiSymbolMap) \
V(Map, long_ascii_symbol_map, LongAsciiSymbolMap) \
V(Map, short_cons_symbol_map, ShortConsSymbolMap) \
V(Map, medium_cons_symbol_map, MediumConsSymbolMap) \
V(Map, long_cons_symbol_map, LongConsSymbolMap) \
V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \
V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \
V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \
V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \
V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \
V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \
V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \
V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \
V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \
V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \
V(Map, long_external_symbol_map, LongExternalSymbolMap) \
V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
V(Map, medium_external_ascii_symbol_map, MediumExternalAsciiSymbolMap) \
V(Map, long_external_ascii_symbol_map, LongExternalAsciiSymbolMap) \
V(Map, short_cons_string_map, ShortConsStringMap) \
V(Map, medium_cons_string_map, MediumConsStringMap) \
V(Map, long_cons_string_map, LongConsStringMap) \
V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \
V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \
V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \
V(Map, short_sliced_string_map, ShortSlicedStringMap) \
V(Map, medium_sliced_string_map, MediumSlicedStringMap) \
V(Map, long_sliced_string_map, LongSlicedStringMap) \
V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \
V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \
V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, medium_external_string_map, MediumExternalStringMap) \
V(Map, long_external_string_map, LongExternalStringMap) \
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, medium_external_ascii_string_map, MediumExternalAsciiStringMap) \
V(Map, long_external_ascii_string_map, LongExternalAsciiStringMap) \
V(Map, undetectable_short_string_map, UndetectableShortStringMap) \
V(Map, undetectable_medium_string_map, UndetectableMediumStringMap) \
V(Map, undetectable_long_string_map, UndetectableLongStringMap) \
V(Map, undetectable_short_ascii_string_map, UndetectableShortAsciiStringMap) \
V(Map, \
undetectable_medium_ascii_string_map, \
UndetectableMediumAsciiStringMap) \
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, hash_table_map, HashTableMap) \
V(Map, context_map, ContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, undefined_value, UndefinedValue) \
V(Object, minus_zero_value, MinusZeroValue) \
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(String, empty_string, EmptyString) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Object, the_hole_value, TheHoleValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Proxy, prototype_accessors, PrototypeAccessors) \
V(NumberDictionary, code_stubs, CodeStubs) \
V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
V(Code, c_entry_code, CEntryCode) \
V(Code, c_entry_debug_break_code, CEntryDebugBreakCode) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
V(Object, symbol_table)
V(SymbolTable, symbol_table, SymbolTable)
#define SYMBOL_LIST(V) \
V(Array_symbol, "Array") \
@ -260,6 +263,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space() { return old_data_space_; }
static OldSpace* code_space() { return code_space_; }
static MapSpace* map_space() { return map_space_; }
static CellSpace* cell_space() { return cell_space_; }
static LargeObjectSpace* lo_space() { return lo_space_; }
static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
@ -288,6 +292,12 @@ class Heap : public AllStatic {
static Object* AllocateJSObject(JSFunction* constructor,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Returns failure if allocation failed.
@ -408,6 +418,12 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateByteArray(int length);
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@ -623,18 +639,29 @@ class Heap : public AllStatic {
global_gc_epilogue_callback_ = callback;
}
// Heap roots
#define ROOT_ACCESSOR(type, name) static type* name() { return name##_; }
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
} \
static inline type* raw_unchecked_##name() { \
return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
// Utility type maps
#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
static Map* name##_map() { return name##_map_; }
static inline Map* name##_map() { \
return Map::cast(roots_[k##Name##MapRootIndex]); \
}
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
#define SYMBOL_ACCESSOR(name, str) static String* name() { return name##_; }
#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \
return String::cast(roots_[k##name##RootIndex]); \
}
SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
@ -679,11 +706,13 @@ class Heap : public AllStatic {
static inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
static void public_set_code_stubs(NumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void set_non_monomorphic_cache(Dictionary* value) {
non_monomorphic_cache_ = value;
static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
}
// Update the next script id.
@ -824,6 +853,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space_;
static OldSpace* code_space_;
static MapSpace* map_space_;
static CellSpace* cell_space_;
static LargeObjectSpace* lo_space_;
static HeapState gc_state_;
@ -836,6 +866,13 @@ class Heap : public AllStatic {
static int mc_count_; // how many mark-compact collections happened
static int gc_count_; // how many gc happened
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline void set_##name(type* value) { \
roots_[k##camel_name##RootIndex] = value; \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
#ifdef DEBUG
static bool allocation_allowed_;
@ -870,20 +907,49 @@ class Heap : public AllStatic {
// last GC.
static int old_gen_exhausted_;
// Declare all the roots
#define ROOT_DECLARATION(type, name) static type* name##_;
ROOT_LIST(ROOT_DECLARATION)
#undef ROOT_DECLARATION
// Declare all the root indices.
enum RootListIndex {
#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) static Map* name##_map_;
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
#define SYMBOL_DECLARATION(name, str) static String* name##_;
SYMBOL_LIST(SYMBOL_DECLARATION)
#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
#undef SYMBOL_DECLARATION
kSymbolTableRootIndex,
kStrongRootListLength = kSymbolTableRootIndex,
kRootListLength
};
static Object* roots_[kRootListLength];
struct StringTypeTable {
InstanceType type;
int size;
RootListIndex index;
};
struct ConstantSymbolTable {
const char* contents;
RootListIndex index;
};
struct StructTable {
InstanceType type;
int size;
RootListIndex index;
};
static const StringTypeTable string_type_table[];
static const ConstantSymbolTable constant_symbol_table[];
static const StructTable struct_table[];
// The special hidden symbol which is an empty string, but does not match
// any string when looked up in properties.
static String* hidden_symbol_;
@ -911,7 +977,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
static inline Object* AllocateRawMap(int size_in_bytes);
static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj,
@ -920,7 +989,15 @@ class Heap : public AllStatic {
static bool CreateInitialMaps();
static bool CreateInitialObjects();
// These four Create*EntryStub functions are here because of a gcc-4.4 bug
// that assigns wrong vtable entries.
static void CreateCEntryStub();
static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub();
static void CreateJSConstructEntryStub();
static void CreateFixedStubs();
static Object* CreateOddball(Map* map,
const char* to_string,
Object* to_number);
@ -1042,9 +1119,11 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
HeapObject* object = HeapObject::cast(*current);
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
#ifndef V8_TARGET_ARCH_X64
if (Heap::InNewSpace(object)) {
ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
}
#endif
}
}
}

14
deps/v8/src/ia32/assembler-ia32.cc

@ -114,8 +114,10 @@ void CpuFeatures::Probe() {
CodeDesc desc;
assm.GetCode(&desc);
Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
Object* code = Heap::CreateCode(desc,
NULL,
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!code->IsCode()) return;
LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
@ -919,6 +921,14 @@ void Assembler::idiv(Register src) {
}
void Assembler::imul(Register reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF7);
EMIT(0xE8 | reg.code());
}
void Assembler::imul(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;

9
deps/v8/src/ia32/assembler-ia32.h

@ -544,15 +544,18 @@ class Assembler : public Malloced {
void idiv(Register src);
void imul(Register dst, const Operand& src);
void imul(Register dst, Register src, int32_t imm32);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
void inc(Register dst);
void inc(const Operand& dst);
void lea(Register dst, const Operand& src);
void mul(Register src);
// Unsigned multiply instruction.
void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);

137
deps/v8/src/ia32/codegen-ia32.cc

@ -1856,40 +1856,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
Major MajorKey() { return Compare; }
int MinorKey() {
// Encode the three parameters in a unique 16 bit value.
ASSERT(static_cast<int>(cc_) < (1 << 15));
return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
}
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
static_cast<int>(cc_),
strict_ ? "true" : "false");
}
#endif
};
void CodeGenerator::Comparison(Condition cc,
bool strict,
ControlDestination* dest) {
@ -4987,6 +4953,29 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// Get the frame pointer for the calling frame.
Result fp = allocator()->Allocate();
__ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
Immediate(ArgumentsAdaptorFrame::SENTINEL));
__ j(not_equal, &check_frame_marker);
__ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
fp.Unuse();
destination()->Split(equal);
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
@ -4999,6 +4988,70 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
JumpTarget leave, null, function, non_function_constructor;
Load(args->at(0)); // Load the object.
Result obj = frame_->Pop();
obj.ToRegister();
frame_->Spill(obj.reg());
// If the object is a smi, we return null.
__ test(obj.reg(), Immediate(kSmiTagMask));
null.Branch(zero);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
{ Result tmp = allocator()->Allocate();
__ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
__ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
__ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
null.Branch(less);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
// LAST_JS_OBJECT_TYPE.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ cmp(tmp.reg(), JS_FUNCTION_TYPE);
function.Branch(equal);
}
// Check if the constructor in the map is a function.
{ Result tmp = allocator()->Allocate();
__ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
__ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
non_function_constructor.Branch(not_equal);
}
// The map register now contains the constructor function. Grab the
// instance class name from there.
__ mov(obj.reg(),
FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
__ mov(obj.reg(),
FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
frame_->Push(&obj);
leave.Jump();
// Functions have class 'Function'.
function.Bind();
frame_->Push(Factory::function_class_symbol());
leave.Jump();
// Objects with a non-function constructor have class 'Object'.
non_function_constructor.Bind();
frame_->Push(Factory::Object_symbol());
leave.Jump();
// Non-JS objects have class null.
null.Bind();
frame_->Push(Factory::null_value());
// All done.
leave.Bind();
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
JumpTarget leave;
@ -7538,6 +7591,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ dec(Operand::StaticVariable(scope_depth));
}
// Make sure we're not trying to return 'the hole' from the runtime
// call as this may lead to crashes in the IC code later.
if (FLAG_debug_code) {
Label okay;
__ cmp(eax, Factory::the_hole_value());
__ j(not_equal, &okay);
__ int3();
__ bind(&okay);
}
// Check for failure result.
Label failure_returned;
ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
@ -7856,6 +7919,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
}
#undef __
} } // namespace v8::internal

6
deps/v8/src/ia32/codegen-ia32.h

@ -522,11 +522,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the value field of an object (used by Date).
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);

30
deps/v8/src/ia32/ic-ia32.cc

@ -66,9 +66,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Test the has_named_interceptor bit in the map.
__ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
// Jump to miss if the interceptor bit is set.
__ j(not_zero, miss_label, not_taken);
// Bail out if we have a JS global proxy object.
__ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
__ cmp(r0, JS_GLOBAL_PROXY_TYPE);
__ j(equal, miss_label, not_taken);
// Possible work-around for http://crbug.com/16276.
__ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
__ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
__ j(equal, miss_label, not_taken);
// Check that the properties array is a dictionary.
__ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
__ cmp(FieldOperand(r0, HeapObject::kMapOffset),
@ -77,7 +89,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the capacity mask.
const int kCapacityOffset =
Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
__ mov(r2, FieldOperand(r0, kCapacityOffset));
__ shr(r2, kSmiTagSize); // convert smi to int
__ dec(r2);
@ -87,18 +99,18 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// cover ~93% of loads from dictionaries.
static const int kProbes = 4;
const int kElementsStartOffset =
Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset));
__ shr(r1, String::kHashShift);
if (i > 0) {
__ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
__ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(r1, Operand(r2));
// Scale the index by multiplying by the element size.
ASSERT(Dictionary::kElementSize == 3);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
@ -431,7 +443,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
@ -468,7 +480,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
__ bind(&miss);
@ -642,7 +654,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, eax, ecx, ebx);
StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
@ -872,7 +884,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));

519
deps/v8/src/ia32/stub-cache-ia32.cc

@ -41,12 +41,33 @@ static void ProbeTable(MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset) {
Register offset,
Register extra) {
ExternalReference key_offset(SCTableReference::keyReference(table));
ExternalReference value_offset(SCTableReference::valueReference(table));
Label miss;
if (extra.is_valid()) {
// Get the code entry from the cache.
__ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
// Check that the key in the entry matches the name.
__ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
__ j(not_equal, &miss, not_taken);
// Check that the flags match what we're looking for.
__ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
__ and_(offset, ~Code::kFlagsNotUsedInLookup);
__ cmp(offset, flags);
__ j(not_equal, &miss);
// Jump to the first instruction in the code stub.
__ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(Operand(extra));
__ bind(&miss);
} else {
// Save the offset on the stack.
__ push(offset);
@ -71,17 +92,19 @@ static void ProbeTable(MacroAssembler* masm,
__ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(Operand(offset));
// Miss: Restore offset and fall through.
// Pop at miss.
__ bind(&miss);
__ pop(offset);
}
}
void StubCache::GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch) {
Register scratch,
Register extra) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
@ -94,6 +117,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Make sure that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
ASSERT(!extra.is(receiver));
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
@ -106,15 +132,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
// Probe the primary table.
ProbeTable(masm, flags, kPrimary, name, scratch);
ProbeTable(masm, flags, kPrimary, name, scratch, extra);
// Primary miss: Compute hash for secondary probe.
__ mov(scratch, FieldOperand(name, String::kLengthOffset));
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
__ sub(scratch, Operand(name));
__ add(Operand(scratch), Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
ProbeTable(masm, flags, kSecondary, name, scratch);
ProbeTable(masm, flags, kSecondary, name, scratch, extra);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
@ -243,114 +273,6 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
void StubCompiler::GenerateLoadField(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss_label, not_taken);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Get the value from the properties.
GenerateFastPropertyLoad(masm, eax, reg, holder, index);
__ ret(0);
}
void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss_label, not_taken);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
__ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
__ push(name); // name
__ push(reg); // holder
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 4);
}
void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss_label, not_taken);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
__ ret(0);
}
void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
Label* miss_label) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss_label, not_taken);
// Check that the maps haven't changed.
Register reg =
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name); // name
// TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
// LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
__ push(Immediate(lookup_hint));
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
__ TailCallRuntime(load_ic_property, 4);
}
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
@ -444,10 +366,159 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
#undef __
#define __ ACCESS_MASM(masm())
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss) {
// Check that the maps haven't changed.
Register result =
masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
// If we've skipped any global objects, it's not enough to verify
// that their maps haven't changed.
while (object != holder) {
if (object->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(object);
Object* probe = global->EnsurePropertyCell(name);
if (probe->IsFailure()) {
set_failure(Failure::cast(probe));
return result;
}
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
Immediate(Factory::the_hole_value()));
__ j(not_equal, miss, not_taken);
}
object = JSObject::cast(object->GetPrototype());
}
// Return the register containin the holder.
return result;
}
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check the prototype chain.
Register reg =
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
// Get the value from the properties.
GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
__ ret(0);
}
void StubCompiler::GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
__ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
__ push(name_reg); // name
__ push(reg); // holder
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 4);
}
void StubCompiler::GenerateLoadConstant(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
__ ret(0);
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, miss, not_taken);
// Check that the maps haven't changed.
Register reg =
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
__ push(reg); // holder
__ push(name_reg); // name
// TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
// LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
__ push(Immediate(lookup_hint));
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_ic_property =
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
__ TailCallRuntime(load_ic_property, 4);
}
// TODO(1241006): Avoid having lazy compile stubs specialized by the
// number of arguments. It is not needed anymore.
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@ -475,9 +546,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
Object* CallStubCompiler::CompileCallField(Object* object,
JSObject* holder,
int index,
String* name,
Code::Flags flags) {
ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
String* name) {
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@ -492,7 +561,8 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -518,16 +588,15 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCodeWithFlags(flags, name);
return GetCode(FIELD, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
CheckType check,
Code::Flags flags) {
ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
String* name,
CheckType check) {
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
@ -549,7 +618,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
switch (check) {
case RECEIVER_MAP_CHECK:
// Check that the maps haven't changed.
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
// Patch the receiver on the stack with the global proxy if
// necessary.
@ -569,8 +639,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
ecx);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
ecx, holder, ebx, edx, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
case NUMBER_CHECK: {
@ -585,8 +655,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::NUMBER_FUNCTION_INDEX,
ecx);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
ecx, holder, ebx, edx, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
@ -602,13 +672,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
GenerateLoadGlobalFunctionPrototype(masm(),
Context::BOOLEAN_FUNCTION_INDEX,
ecx);
__ CheckMaps(JSObject::cast(object->GetPrototype()),
ecx, holder, ebx, edx, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
ebx, edx, name, &miss);
break;
}
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
// Make sure object->elements()->map() != Heap::dictionary_array_map()
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@ -627,6 +698,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
@ -642,7 +714,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
if (function->shared()->name()->IsString()) {
function_name = String::cast(function->shared()->name());
}
return GetCodeWithFlags(flags, function_name);
return GetCode(CONSTANT_FUNCTION, function_name);
}
@ -665,7 +737,8 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Check that maps have not changed and compute the holder register.
Register reg =
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
// Enter an internal frame.
__ EnterInternalFrame();
@ -718,6 +791,70 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::call_global_inline, 1);
// Get the number of arguments.
const int argc = arguments().immediate();
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
}
// Check that the maps haven't changed.
CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
// Get the value from the cell.
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
__ j(not_equal, &miss, not_taken);
// Patch the receiver on the stack with the global proxy.
if (object->IsGlobalObject()) {
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
// Setup the context (function already in edi).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
__ InvokeCode(code, expected, arguments(),
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
// Handle call cache miss.
__ bind(&miss);
__ DecrementCounter(&Counters::call_global_inline, 1);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, name);
}
Object* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@ -861,6 +998,44 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::named_store_global_inline, 1);
// Check that the map of the global has not changed.
__ mov(ebx, (Operand(esp, kPointerSize)));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
// Store the value in the cell.
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
// Return the value (register eax).
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
__ DecrementCounter(&Counters::named_store_global_inline, 1);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, name);
}
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
@ -904,6 +1079,7 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
Object* LoadStubCompiler::CompileLoadField(JSObject* object,
JSObject* holder,
int index,
@ -916,7 +1092,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
Label miss;
__ mov(eax, (Operand(esp, kPointerSize)));
GenerateLoadField(masm(), object, holder, eax, ebx, edx, index, &miss);
GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -937,8 +1113,8 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
Label miss;
__ mov(eax, (Operand(esp, kPointerSize)));
GenerateLoadCallback(masm(), object, holder, eax, ecx, ebx,
edx, callback, &miss);
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -959,7 +1135,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
Label miss;
__ mov(eax, (Operand(esp, kPointerSize)));
GenerateLoadConstant(masm(), object, holder, eax, ebx, edx, value, &miss);
GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -981,14 +1157,14 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ mov(eax, (Operand(esp, kPointerSize)));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(masm(),
receiver,
GenerateLoadInterceptor(receiver,
holder,
holder->InterceptorPropertyLookupHint(name),
eax,
ecx,
edx,
ebx,
name,
&miss);
__ bind(&miss);
@ -999,6 +1175,59 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
}
Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[4] : receiver
// -----------------------------------
Label miss;
__ IncrementCounter(&Counters::named_load_global_inline, 1);
// Get the receiver from the stack.
__ mov(eax, (Operand(esp, kPointerSize)));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
if (object != holder) {
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
}
// Check that the maps haven't changed.
CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
// Get the value from the cell.
__ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
__ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
__ cmp(eax, Factory::the_hole_value());
__ j(equal, &miss, not_taken);
} else if (FLAG_debug_code) {
__ cmp(eax, Factory::the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
__ ret(0);
__ bind(&miss);
__ DecrementCounter(&Counters::named_load_global_inline, 1);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
return GetCode(NORMAL, name);
}
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
JSObject* receiver,
JSObject* holder,
@ -1018,7 +1247,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
GenerateLoadField(masm(), receiver, holder, ecx, ebx, edx, index, &miss);
GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_field, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1047,8 +1277,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
GenerateLoadCallback(masm(), receiver, holder, ecx, eax, ebx, edx,
callback, &miss);
GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
callback, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1077,7 +1307,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
GenerateLoadConstant(masm(), receiver, holder, ecx, ebx, edx, value, &miss);
GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
value, name, &miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_constant_function, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@ -1105,14 +1336,14 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
GenerateLoadInterceptor(masm(),
receiver,
GenerateLoadInterceptor(receiver,
holder,
Smi::FromInt(JSObject::kLookupInHolder),
ecx,
eax,
edx,
ebx,
name,
&miss);
__ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_interceptor, 1);

116
deps/v8/src/ic.cc

@ -265,6 +265,39 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
}
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
static void LookupForRead(Object* object,
String* name,
LookupResult* lookup) {
object->Lookup(name, lookup);
if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) {
return;
}
JSObject* holder = lookup->holder();
if (HasInterceptorGetter(holder)) {
return;
}
// There is no getter, just skip it and lookup down the proto chain
holder->LocalLookupRealNamedProperty(name, lookup);
if (lookup->IsValid()) {
return;
}
Object* proto = holder->GetPrototype();
if (proto == Heap::null_value()) {
return;
}
LookupForRead(proto, name, lookup);
}
Object* CallIC::TryCallAsFunction(Object* object) {
HandleScope scope;
Handle<Object> target(object);
@ -294,13 +327,11 @@ Object* CallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, name);
}
Object* result = Heap::the_hole_value();
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
if (name->AsArrayIndex(&index)) {
result = object->GetElement(index);
Object* result = object->GetElement(index);
if (result->IsJSFunction()) return result;
// Try to find a suitable function delegate for the object at hand.
@ -312,7 +343,7 @@ Object* CallIC::LoadFunction(State state,
// Lookup the property in the object.
LookupResult lookup;
object->Lookup(*name, &lookup);
LookupForRead(*object, *name, &lookup);
if (!lookup.IsValid()) {
// If the object does not have the requested property, check which
@ -328,11 +359,11 @@ Object* CallIC::LoadFunction(State state,
UpdateCaches(&lookup, state, object, name);
}
if (lookup.type() == INTERCEPTOR) {
// Get the property.
PropertyAttributes attr;
result = object->GetProperty(*name, &attr);
Object* result = object->GetProperty(*object, &lookup, *name, &attr);
if (result->IsFailure()) return result;
if (lookup.type() == INTERCEPTOR) {
// If the object does not have the requested property, check which
// exception we need to throw.
if (attr == ABSENT) {
@ -341,11 +372,6 @@ Object* CallIC::LoadFunction(State state,
}
return TypeError("undefined_method", object, name);
}
} else {
// Lookup is valid and no interceptors are involved. Get the
// property.
result = object->GetProperty(*name);
if (result->IsFailure()) return result;
}
ASSERT(result != Heap::the_hole_value());
@ -369,7 +395,7 @@ Object* CallIC::LoadFunction(State state,
// cause GC.
HandleScope scope;
Handle<JSFunction> function(JSFunction::cast(result));
Debug::HandleStepIn(function, fp(), false);
Debug::HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
@ -423,17 +449,34 @@ void CallIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
if (!cell->value()->IsJSFunction()) return;
JSFunction* function = JSFunction::cast(cell->value());
code = StubCache::ComputeCallGlobal(argc,
in_loop,
*name,
*receiver,
global,
cell,
function);
} else {
// There is only one shared stub for calling normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (!object->IsJSObject()) return;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (lookup->holder() != *receiver) return;
code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
}
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeCallInterceptor(argc, *name, *object,
lookup->holder());
break;
@ -445,7 +488,7 @@ void CallIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (code->IsFailure()) return;
if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED ||
@ -520,7 +563,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// Named lookup in the object.
LookupResult lookup;
object->Lookup(*name, &lookup);
LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
@ -614,12 +657,23 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
break;
}
case NORMAL: {
if (lookup->holder()->IsGlobalObject()) {
GlobalObject* global = GlobalObject::cast(lookup->holder());
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
code = StubCache::ComputeLoadGlobal(*name,
*receiver,
global,
cell,
lookup->IsDontDelete());
} else {
// There is only one shared stub for loading normalized
// properties. It does not traverse the prototype chain, so the
// property must be found in the receiver for the stub to be
// applicable.
if (lookup->holder() != *receiver) return;
code = StubCache::ComputeLoadNormal(*name, *receiver);
}
break;
}
case CALLBACKS: {
@ -632,6 +686,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeLoadInterceptor(*name, *receiver,
lookup->holder());
break;
@ -643,7 +698,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (code->IsFailure()) return;
if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
@ -723,7 +778,7 @@ Object* KeyedLoadIC::Load(State state,
// Named lookup.
LookupResult lookup;
object->Lookup(*name, &lookup);
LookupForRead(*object, *name, &lookup);
// If lookup is invalid, check if we need to throw an exception.
if (!lookup.IsValid()) {
@ -817,6 +872,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
break;
}
case INTERCEPTOR: {
ASSERT(HasInterceptorGetter(lookup->holder()));
code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver,
lookup->holder());
break;
@ -832,7 +888,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (code->IsFailure()) return;
if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.
@ -863,7 +919,7 @@ static bool StoreICableLookup(LookupResult* lookup) {
}
static bool LookupForStoreIC(JSObject* object,
static bool LookupForWrite(JSObject* object,
String* name,
LookupResult* lookup) {
object->LocalLookup(name, lookup);
@ -908,7 +964,7 @@ Object* StoreIC::Store(State state,
// Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup;
if (LookupForStoreIC(*receiver, *name, &lookup)) {
if (LookupForWrite(*receiver, *name, &lookup)) {
UpdateCaches(&lookup, state, receiver, name, value);
}
}
@ -953,6 +1009,19 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
code = StubCache::ComputeStoreField(*name, *receiver, index, *transition);
break;
}
case NORMAL: {
if (!receiver->IsGlobalObject()) {
return;
}
// The stub generated for the global object picks the value directly
// from the property cell. So the property must be directly on the
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
code = StubCache::ComputeStoreGlobal(*name, *global, cell);
break;
}
case CALLBACKS: {
if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
@ -961,6 +1030,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
break;
}
case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
code = StubCache::ComputeStoreInterceptor(*name, *receiver);
break;
}
@ -970,7 +1040,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (code->IsFailure()) return;
if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache.
if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
@ -1092,7 +1162,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// If we're unable to compute the stub (not enough memory left), we
// simply avoid updating the caches.
if (code->IsFailure()) return;
if (code == NULL || code->IsFailure()) return;
// Patch the call site depending on the state of the cache. Make
// sure to always rewrite from monomorphic to megamorphic.

64
deps/v8/src/interpreter-irregexp.cc

@ -119,7 +119,7 @@ static void TraceInterpreter(const byte* code_base,
case BC_##name: \
TraceInterpreter(code_base, \
pc, \
backtrack_sp - backtrack_stack, \
backtrack_sp - backtrack_stack_base, \
current, \
current_char, \
BC_##name##_LENGTH, \
@ -142,6 +142,49 @@ static int32_t Load16Aligned(const byte* pc) {
}
// A simple abstraction over the backtracking stack used by the interpreter.
// This backtracking stack does not grow automatically, but it ensures that the
// the memory held by the stack is released or remembered in a cache if the
// matching terminates.
class BacktrackStack {
public:
explicit BacktrackStack() {
if (cache_ != NULL) {
// If the cache is not empty reuse the previously allocated stack.
data_ = cache_;
cache_ = NULL;
} else {
// Cache was empty. Allocate a new backtrack stack.
data_ = NewArray<int>(kBacktrackStackSize);
}
}
~BacktrackStack() {
if (cache_ == NULL) {
// The cache is empty. Keep this backtrack stack around.
cache_ = data_;
} else {
// A backtrack stack was already cached, just release this one.
DeleteArray(data_);
}
}
int* data() const { return data_; }
int max_size() const { return kBacktrackStackSize; }
private:
static const int kBacktrackStackSize = 10000;
int* data_;
static int* cache_;
DISALLOW_COPY_AND_ASSIGN(BacktrackStack);
};
int* BacktrackStack::cache_ = NULL;
template <typename Char>
static bool RawMatch(const byte* code_base,
Vector<const Char> subject,
@ -149,10 +192,13 @@ static bool RawMatch(const byte* code_base,
int current,
uint32_t current_char) {
const byte* pc = code_base;
static const int kBacktrackStackSize = 10000;
int backtrack_stack[kBacktrackStackSize];
int backtrack_stack_space = kBacktrackStackSize;
int* backtrack_sp = backtrack_stack;
// BacktrackStack ensures that the memory allocated for the backtracking stack
// is returned to the system or cached if there is no stack being cached at
// the moment.
BacktrackStack backtrack_stack;
int* backtrack_stack_base = backtrack_stack.data();
int* backtrack_sp = backtrack_stack_base;
int backtrack_stack_space = backtrack_stack.max_size();
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
PrintF("\n\nStart bytecode interpreter\n\n");
@ -202,13 +248,13 @@ static bool RawMatch(const byte* code_base,
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
BYTECODE(SET_REGISTER_TO_SP)
registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack;
registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
BYTECODE(SET_SP_TO_REGISTER)
backtrack_sp = backtrack_stack + registers[insn >> BYTECODE_SHIFT];
backtrack_stack_space = kBacktrackStackSize -
(backtrack_sp - backtrack_stack);
backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
backtrack_stack_space = backtrack_stack.max_size() -
(backtrack_sp - backtrack_stack_base);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
BYTECODE(POP_CP)

116
deps/v8/src/jsregexp.cc

@ -51,6 +51,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#else
#error Unsupported target architecture.
#endif
#include "interpreter-irregexp.h"
@ -261,7 +263,6 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// Irregexp implementation.
// Ensures that the regexp object contains a compiled version of the
// source for either ASCII or non-ASCII strings.
// If the compiled version doesn't already exist, it is compiled
@ -269,25 +270,26 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
// If compilation fails, an exception is thrown and this function
// returns false.
bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
int index;
if (is_ascii) {
index = JSRegExp::kIrregexpASCIICodeIndex;
} else {
index = JSRegExp::kIrregexpUC16CodeIndex;
#ifdef V8_NATIVE_REGEXP
if (re->DataAt(JSRegExp::code_index(is_ascii))->IsCode()) return true;
#else // ! V8_NATIVE_REGEXP (RegExp interpreter code)
if (re->DataAt(JSRegExp::code_index(is_ascii))->IsByteArray()) return true;
#endif
return CompileIrregexp(re, is_ascii);
}
Object* entry = re->DataAt(index);
if (!entry->IsTheHole()) {
// A value has already been compiled.
bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) {
// Compile the RegExp.
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
if (entry->IsJSObject()) {
// If it's a JS value, it's an error.
// If it's a JSObject, a previous compilation failed and threw this object.
// Re-throw the object without trying again.
Top::Throw(entry);
return false;
}
return true;
}
// Compile the RegExp.
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
ASSERT(entry->IsTheHole());
JSRegExp::Flags flags = re->GetFlags();
@ -300,7 +302,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
FlatStringReader reader(pattern);
if (!ParseRegExp(&reader, flags.is_multiline(), &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already parsed it successfully once.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
ThrowRegExpException(re,
pattern,
compile_data.error,
@ -323,17 +325,15 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) {
Handle<Object> regexp_err =
Factory::NewSyntaxError("malformed_regexp", array);
Top::Throw(*regexp_err);
re->SetDataAt(index, *regexp_err);
re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err);
return false;
}
NoHandleAllocation no_handles;
FixedArray* data = FixedArray::cast(re->data());
data->set(index, result.code);
int register_max = IrregexpMaxRegisterCount(data);
Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
data->set(JSRegExp::code_index(is_ascii), result.code);
int register_max = IrregexpMaxRegisterCount(*data);
if (result.num_registers > register_max) {
SetIrregexpMaxRegisterCount(data, result.num_registers);
SetIrregexpMaxRegisterCount(*data, result.num_registers);
}
return true;
@ -362,24 +362,12 @@ int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) {
ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
int index;
if (is_ascii) {
index = JSRegExp::kIrregexpASCIICodeIndex;
} else {
index = JSRegExp::kIrregexpUC16CodeIndex;
}
return ByteArray::cast(re->get(index));
return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
}
Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
int index;
if (is_ascii) {
index = JSRegExp::kIrregexpASCIICodeIndex;
} else {
index = JSRegExp::kIrregexpUC16CodeIndex;
}
return Code::cast(re->get(index));
return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
}
@ -406,12 +394,14 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
int number_of_capture_registers =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
#ifndef V8_NATIVE_REGEXP
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
String* pattern = jsregexp->Pattern();
PrintF("\n\nRegexp match: /%s/\n\n", *(pattern->ToCString()));
PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
}
#endif
#endif
if (!subject->IsFlat()) {
@ -420,16 +410,11 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
bool rc;
// We have to initialize this with something to make gcc happy but we can't
// initialize it with its real value until after the GC-causing things are
// over.
FixedArray* array = NULL;
Handle<FixedArray> array;
// Dispatch to the correct RegExp implementation.
Handle<String> original_subject = subject;
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
if (UseNativeRegexp()) {
#ifdef V8_NATIVE_REGEXP
#if V8_TARGET_ARCH_IA32
OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector();
@ -455,20 +440,19 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
ASSERT(res == RegExpMacroAssemblerIA32::SUCCESS
|| res == RegExpMacroAssemblerIA32::FAILURE);
rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
if (!rc) return Factory::null_value();
if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value();
array = last_match_info->elements();
array = Handle<FixedArray>(last_match_info->elements());
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
SetCapture(array, i, captures_vector[i]);
SetCapture(array, i + 1, captures_vector[i + 1]);
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
#else // !V8_TARGET_ARCH_IA32
UNREACHABLE();
#endif
} else {
#endif // V8_TARGET_ARCH_IA32
#else // !V8_NATIVE_REGEXP
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
@ -484,24 +468,25 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
rc = IrregexpInterpreter::Match(byte_codes,
if (!IrregexpInterpreter::Match(byte_codes,
subject,
register_vector,
previous_index);
if (!rc) return Factory::null_value();
previous_index)) {
return Factory::null_value();
}
array = last_match_info->elements();
array = Handle<FixedArray>(last_match_info->elements());
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
SetCapture(array, i, register_vector[i]);
SetCapture(array, i + 1, register_vector[i + 1]);
}
SetCapture(*array, i, register_vector[i]);
SetCapture(*array, i + 1, register_vector[i + 1]);
}
#endif // V8_NATIVE_REGEXP
SetLastCaptureCount(array, number_of_capture_registers);
SetLastSubject(array, *original_subject);
SetLastInput(array, *original_subject);
SetLastCaptureCount(*array, number_of_capture_registers);
SetLastSubject(*array, *subject);
SetLastInput(*array, *subject);
return last_match_info;
}
@ -4472,11 +4457,13 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
NodeInfo info = *node->info();
if (RegExpImpl::UseNativeRegexp()) {
#ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_ARM
// ARM native regexp not implemented yet.
UNREACHABLE();
#endif
#ifdef V8_TARGET_ARCH_X64
// X64 native regexp not implemented yet.
UNREACHABLE();
#endif
#ifdef V8_TARGET_ARCH_IA32
@ -4493,14 +4480,15 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
data->capture_count,
pattern);
#endif
}
#else // ! V8_NATIVE_REGEXP
// Interpreted regexp.
EmbeddedVector<byte, 1024> codes;
RegExpMacroAssemblerIrregexp macro_assembler(codes);
return compiler.Assemble(&macro_assembler,
node,
data->capture_count,
pattern);
#endif // V8_NATIVE_REGEXP
}
}} // namespace v8::internal

11
deps/v8/src/jsregexp.h

@ -37,13 +37,15 @@ class RegExpMacroAssembler;
class RegExpImpl {
public:
static inline bool UseNativeRegexp() {
#ifdef V8_TARGET_ARCH_IA32
return FLAG_regexp_native;
// Whether V8 is compiled with native regexp support or not.
static bool UsesNativeRegExp() {
#ifdef V8_NATIVE_REGEXP
return true;
#else
return false;
#endif
}
// Creates a regular expression literal in the old space.
// This function calls the garbage collector if necessary.
static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor,
@ -148,7 +150,8 @@ class RegExpImpl {
static String* last_ascii_string_;
static String* two_byte_cached_string_;
static bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii);
static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii);
// Set the subject cache. The previous string buffer is not deleted, so the

5
deps/v8/src/log.cc

@ -176,8 +176,11 @@ class Ticker: public Sampler {
~Ticker() { if (IsActive()) Stop(); }
void SampleStack(TickSample* sample) {
StackTracer::Trace(sample);
}
void Tick(TickSample* sample) {
if (IsProfiling()) StackTracer::Trace(sample);
if (profiler_) profiler_->Insert(sample);
if (window_) window_->AddState(sample->state);
}

2
deps/v8/src/macro-assembler.h

@ -47,6 +47,8 @@
#include "arm/assembler-arm-inl.h"
#include "code.h" // must be after assembler_*.h
#include "arm/macro-assembler-arm.h"
#else
#error Unsupported target architecture.
#endif
#endif // V8_MACRO_ASSEMBLER_H_

21
deps/v8/src/macros.py

@ -82,13 +82,16 @@ macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_REGEXP(arg) = %HasRegExpClass(arg);
macro IS_ARRAY(arg) = %HasArrayClass(arg);
macro IS_DATE(arg) = %HasDateClass(arg);
macro IS_NUMBER_WRAPPER(arg) = %HasNumberClass(arg);
macro IS_STRING_WRAPPER(arg) = %HasStringClass(arg);
macro IS_ERROR(arg) = (%ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%ClassOf(arg) === 'Script');
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro FLOOR(arg) = %Math_floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
@ -111,6 +114,10 @@ const REGEXP_FIRST_CAPTURE = 3;
# REGEXP_NUMBER_OF_CAPTURES
macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
# Last input and last subject are after the captures so we can omit them on
# results returned from global searches. Beware - these evaluate their
# arguments twice.

185
deps/v8/src/mark-compact.cc

@ -56,6 +56,7 @@ int MarkCompactCollector::live_old_data_objects_ = 0;
int MarkCompactCollector::live_old_pointer_objects_ = 0;
int MarkCompactCollector::live_code_objects_ = 0;
int MarkCompactCollector::live_map_objects_ = 0;
int MarkCompactCollector::live_cell_objects_ = 0;
int MarkCompactCollector::live_lo_objects_ = 0;
#endif
@ -155,6 +156,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
live_old_data_objects_ = 0;
live_code_objects_ = 0;
live_map_objects_ = 0;
live_cell_objects_ = 0;
live_lo_objects_ = 0;
#endif
}
@ -224,7 +226,9 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
if (reinterpret_cast<String*>(second) != Heap::empty_string()) return object;
if (second != Heap::raw_unchecked_empty_string()) {
return object;
}
// Since we don't have the object's start, it is impossible to update the
// remembered set. Therefore, we only replace the string with its left
@ -421,7 +425,7 @@ class SymbolTableCleaner : public ObjectVisitor {
}
}
// Set the entry to null_value (as deleted).
*p = Heap::null_value();
*p = Heap::raw_unchecked_null_value();
pointers_removed_++;
}
}
@ -475,7 +479,7 @@ void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
if (descriptors->IsMarked()) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != Heap::empty_descriptor_array());
ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
SetMark(descriptors);
FixedArray* contents = reinterpret_cast<FixedArray*>(
@ -590,7 +594,7 @@ void MarkCompactCollector::MarkSymbolTable() {
// and if it is a sliced string or a cons string backed by an
// external string (even indirectly), then the external string does
// not receive a weak reference callback.
SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
// Explicitly mark the prefix.
@ -708,6 +712,10 @@ void MarkCompactCollector::RefillMarkingStack() {
ScanOverflowedObjects(&map_it);
if (marking_stack.is_full()) return;
HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
ScanOverflowedObjects(&cell_it);
if (marking_stack.is_full()) return;
LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
ScanOverflowedObjects(&lo_it);
if (marking_stack.is_full()) return;
@ -780,10 +788,9 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessObjectGroups(root_visitor.stack_visitor());
// Prune the symbol table removing all symbols only pointed to by the
// symbol table. Cannot use SymbolTable::cast here because the symbol
// symbol table. Cannot use symbol_table() here because the symbol
// table is marked.
SymbolTable* symbol_table =
reinterpret_cast<SymbolTable*>(Heap::symbol_table());
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
SymbolTableCleaner v;
symbol_table->IterateElements(&v);
symbol_table->ElementsRemoved(v.PointersRemoved());
@ -808,6 +815,9 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
} else if (Heap::map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_++;
} else if (Heap::cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_++;
} else if (Heap::old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_++;
} else if (Heap::old_data_space()->Contains(obj)) {
@ -967,27 +977,32 @@ inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
int object_size) {
return Heap::old_pointer_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
return Heap::old_data_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
return Heap::code_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
return Heap::map_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
return Heap::cell_space()->MCAllocateRaw(object_size);
}
// The forwarding address is encoded at the same offset as the current
// to-space object, but in from space.
inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
@ -1141,12 +1156,12 @@ static void SweepSpace(NewSpace* space) {
// We give non-live objects a map that will correctly give their size,
// since their existing map might not be live after the collection.
int size = object->Size();
if (size >= Array::kHeaderSize) {
object->set_map(Heap::byte_array_map());
if (size >= ByteArray::kHeaderSize) {
object->set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
} else {
ASSERT(size == kPointerSize);
object->set_map(Heap::one_word_filler_map());
object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
}
ASSERT(object->Size() == size);
}
@ -1196,8 +1211,8 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// loop.
}
// If the last region was not live we need to from free_start to the
// allocation top in the page.
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
int free_size = p->AllocationTop() - free_start;
if (free_size > 0) {
@ -1241,6 +1256,21 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a);
}
}
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
@ -1261,6 +1291,11 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
LogNonLiveCodeObject>(
Heap::code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
IgnoreNonLiveObject>(
Heap::cell_space());
// Compute new space next to last after the old and code spaces have been
// compacted. Objects in new space can be promoted to old or code space.
EncodeForwardingAddressesInNewSpace();
@ -1279,6 +1314,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
Heap::old_data_space()->MCWriteRelocationInfoToPage();
Heap::code_space()->MCWriteRelocationInfoToPage();
Heap::map_space()->MCWriteRelocationInfoToPage();
Heap::cell_space()->MCWriteRelocationInfoToPage();
}
@ -1293,6 +1329,7 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
}
@ -1371,15 +1408,16 @@ class UpdatingVisitor: public ObjectVisitor {
ASSERT(!Heap::InFromSpace(obj));
if (Heap::new_space()->Contains(obj)) {
Address f_addr = Heap::new_space()->FromSpaceLow() +
Address forwarding_pointer_addr =
Heap::new_space()->FromSpaceLow() +
Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(f_addr);
new_addr = Memory::Address_at(forwarding_pointer_addr);
#ifdef DEBUG
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::new_space()->FromSpaceContains(new_addr));
Heap::new_space()->FromSpaceContains(new_addr) ||
Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
if (Heap::new_space()->FromSpaceContains(new_addr)) {
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
@ -1392,32 +1430,19 @@ class UpdatingVisitor: public ObjectVisitor {
return;
} else {
ASSERT(Heap::old_pointer_space()->Contains(obj) ||
Heap::old_data_space()->Contains(obj) ||
Heap::code_space()->Contains(obj) ||
Heap::map_space()->Contains(obj));
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::map_space()->Contains(new_addr));
#ifdef DEBUG
if (Heap::old_pointer_space()->Contains(obj)) {
ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
} else if (Heap::old_data_space()->Contains(obj)) {
ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
} else if (Heap::code_space()->Contains(obj)) {
ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
} else {
ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::map_space()->MCSpaceOffsetForAddress(old_addr));
PagedSpaces spaces;
PagedSpace* original_space = spaces.next();
while (original_space != NULL) {
if (original_space->Contains(obj)) break;
original_space = spaces.next();
}
ASSERT(original_space != NULL);
#endif
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
ASSERT(original_space->Contains(new_addr));
ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
original_space->MCSpaceOffsetForAddress(old_addr));
}
*p = HeapObject::FromAddress(new_addr);
@ -1449,6 +1474,8 @@ void MarkCompactCollector::UpdatePointers() {
&UpdatePointersInOldObject);
int live_codes = IterateLiveObjects(Heap::code_space(),
&UpdatePointersInOldObject);
int live_cells = IterateLiveObjects(Heap::cell_space(),
&UpdatePointersInOldObject);
int live_news = IterateLiveObjects(Heap::new_space(),
&UpdatePointersInNewObject);
@ -1460,15 +1487,14 @@ void MarkCompactCollector::UpdatePointers() {
USE(live_pointer_olds);
USE(live_data_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
#endif
}
@ -1589,30 +1615,31 @@ void MarkCompactCollector::RelocateObjects() {
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
&RelocateOldDataObject);
int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
USE(live_maps);
USE(live_data_olds);
USE(live_pointer_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
#endif
// Notify code object in LO to convert IC target to address
// This must happen after lo_space_->Compact
LargeObjectIterator it(Heap::lo_space());
while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
// Flips from and to spaces
// Flip from and to spaces
Heap::new_space()->Flip();
// Sets age_mark to bottom in to space
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
@ -1636,7 +1663,7 @@ int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// decode map pointer (forwarded address)
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
@ -1644,10 +1671,10 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// recover map pointer
// Reset map pointer. The meta map object may not be copied yet so
// Map::cast does not yet work.
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
// The meta map object may not be copied yet.
Address old_addr = obj->address();
if (new_addr != old_addr) {
@ -1664,23 +1691,23 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
}
static inline int RelocateOldObject(HeapObject* obj,
OldSpace* space,
static inline int RestoreMap(HeapObject* obj,
PagedSpace* space,
Address new_addr,
Address map_addr) {
// recover map pointer
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
// This must be a non-map object, and the function relies on the
// assumption that the Map space is compacted before the other paged
// spaces (see RelocateObjects).
// Reset map pointer.
obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
// This is a non-map object, it relies on the assumption that the Map space
// is compacted before the Old space (see RelocateObjects).
int obj_size = obj->Size();
ASSERT_OBJECT_SIZE(obj_size);
ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
space->MCSpaceOffsetForAddress(obj->address()));
space->MCAdjustRelocationEnd(new_addr, obj_size);
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("relocate %p -> %p\n", obj->address(), new_addr);
@ -1692,21 +1719,22 @@ static inline int RelocateOldObject(HeapObject* obj,
int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
OldSpace* space) {
// decode map pointer (forwarded address)
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
// Reset the map pointer.
int obj_size = RestoreMap(obj, space, new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // copy contents
memmove(new_addr, old_addr, obj_size); // Copy contents
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@ -1725,8 +1753,13 @@ int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
}
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
return RelocateOldNonCodeObject(obj, Heap::cell_space());
}
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// decode map pointer (forwarded address)
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
@ -1734,23 +1767,23 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
// Reset the map pointer.
int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
// convert inline cache target to address using old address
// Convert inline cache target to address using old address.
if (obj->IsCode()) {
// convert target to address first related to old_address
Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
}
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // copy contents
memmove(new_addr, old_addr, obj_size); // Copy contents.
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsCode()) {
// may also update inline cache target.
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
LOG(CodeMoveEvent(old_addr, new_addr));
@ -1770,15 +1803,15 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
Address new_addr =
Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
if (Heap::new_space()->FromSpaceContains(new_addr)) {
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
OldSpace* target_space = Heap::TargetSpace(obj);
ASSERT(target_space == Heap::old_pointer_space() ||
target_space == Heap::old_data_space());
target_space->MCAdjustRelocationEnd(new_addr, obj_size);
ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
Heap::TargetSpace(obj) == Heap::old_data_space());
}
#endif
// New and old addresses cannot overlap.
memcpy(reinterpret_cast<void*>(new_addr),

10
deps/v8/src/mark-compact.h

@ -293,6 +293,7 @@ class MarkCompactCollector: public AllStatic {
static void DeallocateOldDataBlock(Address start, int size_in_bytes);
static void DeallocateCodeBlock(Address start, int size_in_bytes);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static void DeallocateCellBlock(Address start, int size_in_bytes);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
@ -352,8 +353,12 @@ class MarkCompactCollector: public AllStatic {
static int RelocateOldPointerObject(HeapObject* obj);
static int RelocateOldDataObject(HeapObject* obj);
// Relocate a property cell object.
static int RelocateCellObject(HeapObject* obj);
// Helper function.
static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
static inline int RelocateOldNonCodeObject(HeapObject* obj,
PagedSpace* space);
// Relocates an object in the code space.
static int RelocateCodeObject(HeapObject* obj);
@ -393,6 +398,9 @@ class MarkCompactCollector: public AllStatic {
// Number of live objects in Heap::map_space_.
static int live_map_objects_;
// Number of live objects in Heap::cell_space_.
static int live_cell_objects_;
// Number of live objects in Heap::lo_space_.
static int live_lo_objects_;

26
deps/v8/src/math.js

@ -68,10 +68,12 @@ function MathAtan(x) {
}
// ECMA 262 - 15.8.2.5
function MathAtan2(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2(y, x) {
if (!IS_NUMBER(y)) y = ToNumber(y);
return %Math_atan2(x, y);
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_atan2(y, x);
}
// ECMA 262 - 15.8.2.6
@ -95,7 +97,9 @@ function MathExp(x) {
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (0 < x && x <= 0x7FFFFFFF) {
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x <= 0x7FFFFFFF && x > 0) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
@ -115,11 +119,12 @@ function MathLog(x) {
// ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2
var r = -$Infinity;
for (var i = %_ArgumentsLength() - 1; i >= 0; --i) {
var length = %_ArgumentsLength();
for (var i = 0; i < length; i++) {
var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is consider greater than -0.
if (n > r || (n === 0 && r === 0 && (1 / n) > (1 / r))) r = n;
// Make sure +0 is considered greater than -0.
if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n;
}
return r;
}
@ -127,11 +132,12 @@ function MathMax(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.12
function MathMin(arg1, arg2) { // length == 2
var r = $Infinity;
for (var i = %_ArgumentsLength() - 1; i >= 0; --i) {
var length = %_ArgumentsLength();
for (var i = 0; i < length; i++) {
var n = ToNumber(%_Arguments(i));
if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is consider less than +0.
if (n < r || (n === 0 && r === 0 && (1 / n) < (1 / r))) r = n;
// Make sure -0 is considered less than +0.
if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n;
}
return r;
}

288
deps/v8/src/messages.js

@ -60,10 +60,8 @@ const kMessages = {
unexpected_token_string: "Unexpected string",
unexpected_token_identifier: "Unexpected identifier",
unexpected_eos: "Unexpected end of input",
expected_label: "Expected label",
malformed_regexp: "Invalid regular expression: /%0/: %1",
unterminated_regexp: "Invalid regular expression: missing /",
pcre_error: "PCRE function %0, error code %1",
regexp_flags: "Cannot supply flags when constructing one RegExp from another",
invalid_lhs_in_assignment: "Invalid left-hand side in assignment",
invalid_lhs_in_for_in: "Invalid left-hand side in for-in",
@ -74,21 +72,17 @@ const kMessages = {
redeclaration: "%0 '%1' has already been declared",
no_catch_or_finally: "Missing catch or finally after try",
unknown_label: "Undefined label '%0'",
invalid_break: "Invalid break statement",
invalid_continue: "Invalid continue statement",
uncaught_exception: "Uncaught %0",
stack_trace: "Stack Trace:\n%0",
called_non_callable: "%0 is not a function",
undefined_method: "Object %1 has no method '%0'",
property_not_function: "Property '%0' of object %1 is not a function",
null_or_undefined: "Cannot access property of null or undefined",
cannot_convert_to_primitive: "Cannot convert object to primitive value",
not_constructor: "%0 is not a constructor",
not_defined: "%0 is not defined",
non_object_property_load: "Cannot read property '%0' of %1",
non_object_property_store: "Cannot set property '%0' of %1",
non_object_property_call: "Cannot call method '%0' of %1",
illegal_eval: "Unsupported indirect eval() call",
with_expression: "%0 has no properties",
illegal_invocation: "Illegal invocation",
no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter",
@ -101,13 +95,11 @@ const kMessages = {
reduce_no_initial: "Reduce of empty array with no initial value",
// RangeError
invalid_array_length: "Invalid array length",
invalid_array_apply_length: "Function.prototype.apply supports only up to 1024 arguments",
stack_overflow: "Maximum call stack size exceeded",
apply_overflow: "Function.prototype.apply cannot support %0 arguments",
// SyntaxError
unable_to_parse: "Parse error",
duplicate_regexp_flag: "Duplicate RegExp flag %0",
unrecognized_regexp_flag: "Unrecognized RegExp flag %0",
invalid_regexp: "Invalid RegExp pattern /%0/",
illegal_break: "Illegal break statement",
illegal_continue: "Illegal continue statement",
@ -557,54 +549,8 @@ function MakeMessage(type, args, startPos, endPos, script, stackTrace) {
function GetStackTraceLine(recv, fun, pos, isGlobal) {
try {
return UnsafeGetStackTraceLine(recv, fun, pos, isGlobal);
} catch (e) {
return "<error: " + e + ">";
}
}
function GetFunctionName(fun, recv) {
var name = %FunctionGetName(fun);
if (name) return name;
for (var prop in recv) {
if (recv[prop] === fun)
return prop;
return FormatSourcePosition(new CallSite(recv, fun, pos));
}
return "[anonymous]";
}
function UnsafeGetStackTraceLine(recv, fun, pos, isTopLevel) {
var result = "";
// The global frame has no meaningful function or receiver
if (!isTopLevel) {
// If the receiver is not the global object then prefix the
// message send
if (recv !== global)
result += ToDetailString(recv) + ".";
result += GetFunctionName(fun, recv);
}
if (pos != -1) {
var script = %FunctionGetScript(fun);
var file;
if (script) {
file = %FunctionGetScript(fun).data;
}
if (file) {
var location = %FunctionGetScript(fun).locationFromPosition(pos, true);
if (!isTopLevel) result += "(";
result += file;
if (location != null) {
result += ":" + (location.line + 1) + ":" + (location.column + 1);
}
if (!isTopLevel) result += ")";
}
}
return (result) ? " at " + result : result;
}
// ----------------------------------------------------------------------------
// Error implementation
@ -632,6 +578,226 @@ function DefineOneShotAccessor(obj, name, fun) {
});
}
function CallSite(receiver, fun, pos) {
this.receiver = receiver;
this.fun = fun;
this.pos = pos;
}
CallSite.prototype.getThis = function () {
return this.receiver;
};
CallSite.prototype.getTypeName = function () {
var constructor = this.receiver.constructor;
if (!constructor)
return $Object.prototype.toString.call(this.receiver);
var constructorName = constructor.name;
if (!constructorName)
return $Object.prototype.toString.call(this.receiver);
return constructorName;
};
CallSite.prototype.isToplevel = function () {
if (this.receiver == null)
return true;
return IS_GLOBAL(this.receiver);
};
CallSite.prototype.isEval = function () {
var script = %FunctionGetScript(this.fun);
return script && script.compilation_type == 1;
};
CallSite.prototype.getEvalOrigin = function () {
var script = %FunctionGetScript(this.fun);
if (!script || script.compilation_type != 1)
return null;
return new CallSite(null, script.eval_from_function,
script.eval_from_position);
};
CallSite.prototype.getFunction = function () {
return this.fun;
};
CallSite.prototype.getFunctionName = function () {
// See if the function knows its own name
var name = this.fun.name;
if (name) {
return name;
} else {
return %FunctionGetInferredName(this.fun);
}
// Maybe this is an evaluation?
var script = %FunctionGetScript(this.fun);
if (script && script.compilation_type == 1)
return "eval";
return null;
};
CallSite.prototype.getMethodName = function () {
// See if we can find a unique property on the receiver that holds
// this function.
var ownName = this.fun.name;
if (ownName && this.receiver && this.receiver[ownName] === this.fun)
// To handle DontEnum properties we guess that the method has
// the same name as the function.
return ownName;
var name = null;
for (var prop in this.receiver) {
if (this.receiver[prop] === this.fun) {
// If we find more than one match bail out to avoid confusion
if (name)
return null;
name = prop;
}
}
if (name)
return name;
return null;
};
CallSite.prototype.getFileName = function () {
var script = %FunctionGetScript(this.fun);
return script ? script.name : null;
};
CallSite.prototype.getLineNumber = function () {
if (this.pos == -1)
return null;
var script = %FunctionGetScript(this.fun);
var location = null;
if (script) {
location = script.locationFromPosition(this.pos, true);
}
return location ? location.line + 1 : null;
};
CallSite.prototype.getColumnNumber = function () {
if (this.pos == -1)
return null;
var script = %FunctionGetScript(this.fun);
var location = null;
if (script) {
location = script.locationFromPosition(this.pos, true);
}
return location ? location.column : null;
};
CallSite.prototype.isNative = function () {
var script = %FunctionGetScript(this.fun);
return script ? (script.type == 0) : false;
};
CallSite.prototype.getPosition = function () {
return this.pos;
};
CallSite.prototype.isConstructor = function () {
var constructor = this.receiver ? this.receiver.constructor : null;
if (!constructor)
return false;
return this.fun === constructor;
};
function FormatSourcePosition(frame) {
var fileLocation = "";
if (frame.isNative()) {
fileLocation = "native";
} else if (frame.isEval()) {
fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin());
} else {
var fileName = frame.getFileName();
if (fileName) {
fileLocation += fileName;
var lineNumber = frame.getLineNumber();
if (lineNumber != null) {
fileLocation += ":" + lineNumber;
var columnNumber = frame.getColumnNumber();
if (columnNumber) {
fileLocation += ":" + columnNumber;
}
}
}
}
if (!fileLocation) {
fileLocation = "unknown source";
}
var line = "";
var functionName = frame.getFunction().name;
var methodName = frame.getMethodName();
var addPrefix = true;
var isConstructor = frame.isConstructor();
var isMethodCall = !(frame.isToplevel() || isConstructor);
if (isMethodCall) {
line += frame.getTypeName() + ".";
if (functionName) {
line += functionName;
if (methodName && (methodName != functionName)) {
line += " [as " + methodName + "]";
}
} else {
line += methodName || "<anonymous>";
}
} else if (isConstructor) {
line += "new " + (functionName || "<anonymous>");
} else if (functionName) {
line += functionName;
} else {
line += fileLocation;
addPrefix = false;
}
if (addPrefix) {
line += " (" + fileLocation + ")";
}
return line;
}
function FormatStackTrace(error, frames) {
var lines = [];
try {
lines.push(error.toString());
} catch (e) {
try {
lines.push("<error: " + e + ">");
} catch (ee) {
lines.push("<error>");
}
}
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
try {
var line = FormatSourcePosition(frame);
} catch (e) {
try {
var line = "<error: " + e + ">";
} catch (ee) {
// Any code that reaches this point is seriously nasty!
var line = "<error>";
}
}
lines.push(" at " + line);
}
return lines.join("\n");
}
function FormatRawStackTrace(error, raw_stack) {
var frames = [ ];
for (var i = 0; i < raw_stack.length; i += 3) {
var recv = raw_stack[i];
var fun = raw_stack[i+1];
var pc = raw_stack[i+2];
var pos = %FunctionGetPositionForOffset(fun, pc);
frames.push(new CallSite(recv, fun, pos));
}
if (IS_FUNCTION($Error.prepareStackTrace)) {
return $Error.prepareStackTrace(error, frames);
} else {
return FormatStackTrace(error, frames);
}
}
function DefineError(f) {
// Store the error function in both the global object
// and the runtime object. The function is fetched
@ -659,7 +825,7 @@ function DefineError(f) {
%SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
f.prototype.name = name;
%SetCode(f, function(m) {
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
if (m === kAddMessageAccessorsMarker) {
DefineOneShotAccessor(this, 'message', function (obj) {
return FormatMessage({type: obj.type, args: obj.arguments});
@ -667,6 +833,16 @@ function DefineError(f) {
} else if (!IS_UNDEFINED(m)) {
this.message = ToString(m);
}
var stackTraceLimit = $Error.stackTraceLimit;
if (stackTraceLimit) {
// Cap the limit to avoid extremely big traces
if (stackTraceLimit < 0 || stackTraceLimit > 10000)
stackTraceLimit = 10000;
var raw_stack = %CollectStackTrace(f, stackTraceLimit);
DefineOneShotAccessor(this, 'stack', function (obj) {
return FormatRawStackTrace(obj, raw_stack);
});
}
} else {
return new f(m);
}

2
deps/v8/src/mirror-delay.js

@ -580,7 +580,7 @@ inherits(ObjectMirror, ValueMirror);
ObjectMirror.prototype.className = function() {
return %ClassOf(this.value_);
return %_ClassOf(this.value_);
};

99
deps/v8/src/objects-debug.cc

@ -152,7 +152,9 @@ void HeapObject::HeapObjectPrint() {
case SHARED_FUNCTION_INFO_TYPE:
SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint();
break;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint();
break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
Name::cast(this)->Name##Print(); \
@ -214,6 +216,9 @@ void HeapObject::HeapObjectVerify() {
case JS_BUILTINS_OBJECT_TYPE:
JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify();
break;
case JS_GLOBAL_PROPERTY_CELL_TYPE:
JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify();
break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
break;
@ -266,29 +271,38 @@ void ByteArray::ByteArrayVerify() {
void JSObject::PrintProperties() {
if (HasFastProperties()) {
for (DescriptorReader r(map()->instance_descriptors());
!r.eos();
r.advance()) {
DescriptorArray* descs = map()->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
PrintF(" ");
r.GetKey()->StringPrint();
descs->GetKey(i)->StringPrint();
PrintF(": ");
if (r.type() == FIELD) {
FastPropertyAt(r.GetFieldIndex())->ShortPrint();
PrintF(" (field at offset %d)\n", r.GetFieldIndex());
} else if (r.type() == CONSTANT_FUNCTION) {
r.GetConstantFunction()->ShortPrint();
switch (descs->GetType(i)) {
case FIELD: {
int index = descs->GetFieldIndex(i);
FastPropertyAt(index)->ShortPrint();
PrintF(" (field at offset %d)\n", index);
break;
}
case CONSTANT_FUNCTION:
descs->GetConstantFunction(i)->ShortPrint();
PrintF(" (constant function)\n");
} else if (r.type() == CALLBACKS) {
r.GetCallbacksObject()->ShortPrint();
break;
case CALLBACKS:
descs->GetCallbacksObject(i)->ShortPrint();
PrintF(" (callback)\n");
} else if (r.type() == MAP_TRANSITION) {
break;
case MAP_TRANSITION:
PrintF(" (map transition)\n");
} else if (r.type() == CONSTANT_TRANSITION) {
break;
case CONSTANT_TRANSITION:
PrintF(" (constant transition)\n");
} else if (r.type() == NULL_DESCRIPTOR) {
break;
case NULL_DESCRIPTOR:
PrintF(" (null descriptor)\n");
} else {
break;
default:
UNREACHABLE();
break;
}
}
} else {
@ -392,6 +406,7 @@ static const char* TypeToString(InstanceType type) {
case JS_OBJECT_TYPE: return "JS_OBJECT";
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
case ODDBALL_TYPE: return "ODDBALL";
case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
@ -428,6 +443,9 @@ void Map::MapPrint() {
if (is_undetectable()) {
PrintF(" - undetectable\n");
}
if (needs_loading()) {
PrintF(" - needs_loading\n");
}
if (has_instance_call_handler()) {
PrintF(" - instance_call_handler\n");
}
@ -653,6 +671,17 @@ void Oddball::OddballVerify() {
}
void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() {
CHECK(IsJSGlobalPropertyCell());
VerifyObjectField(kValueOffset);
}
void JSGlobalPropertyCell::JSGlobalPropertyCellPrint() {
HeapObject::PrintHeader("JSGlobalPropertyCell");
}
void Code::CodePrint() {
HeapObject::PrintHeader("Code");
#ifdef ENABLE_DISASSEMBLER
@ -694,7 +723,7 @@ void JSRegExp::JSRegExpVerify() {
break;
}
case JSRegExp::IRREGEXP: {
bool is_native = RegExpImpl::UseNativeRegexp();
bool is_native = RegExpImpl::UsesNativeRegExp();
FixedArray* arr = FixedArray::cast(data());
Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
@ -725,25 +754,6 @@ void Proxy::ProxyVerify() {
}
void Dictionary::Print() {
int capacity = Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = KeyAt(i);
if (IsKey(k)) {
PrintF(" ");
if (k->IsString()) {
String::cast(k)->StringPrint();
} else {
k->ShortPrint();
}
PrintF(": ");
ValueAt(i)->ShortPrint();
PrintF("\n");
}
}
}
void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
VerifyPointer(getter());
@ -997,7 +1007,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
info->number_of_fast_unused_fields_ += map()->unused_property_fields();
} else {
Dictionary* dict = property_dictionary();
StringDictionary* dict = property_dictionary();
info->number_of_slow_used_properties_ += dict->NumberOfElements();
info->number_of_slow_unused_properties_ +=
dict->Capacity() - dict->NumberOfElements();
@ -1014,7 +1024,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
} else {
Dictionary* dict = element_dictionary();
NumberDictionary* dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
dict->Capacity() - dict->NumberOfElements();
@ -1061,11 +1071,10 @@ void JSObject::SpillInformation::Print() {
void DescriptorArray::PrintDescriptors() {
PrintF("Descriptor array %d\n", number_of_descriptors());
int number = 0;
for (DescriptorReader r(this); !r.eos(); r.advance()) {
for (int i = 0; i < number_of_descriptors(); i++) {
PrintF(" %d: ", i);
Descriptor desc;
r.Get(&desc);
PrintF(" %d: ", number++);
Get(i, &desc);
desc.Print();
}
PrintF("\n");
@ -1075,14 +1084,14 @@ void DescriptorArray::PrintDescriptors() {
bool DescriptorArray::IsSortedNoDuplicates() {
String* current_key = NULL;
uint32_t current = 0;
for (DescriptorReader r(this); !r.eos(); r.advance()) {
String* key = r.GetKey();
for (int i = 0; i < number_of_descriptors(); i++) {
String* key = GetKey(i);
if (key == current_key) {
PrintDescriptors();
return false;
}
current_key = key;
uint32_t hash = r.GetKey()->Hash();
uint32_t hash = GetKey(i)->Hash();
if (hash < current) {
PrintDescriptors();
return false;

132
deps/v8/src/objects-inl.h

@ -53,6 +53,13 @@ Smi* PropertyDetails::AsSmi() {
}
PropertyDetails PropertyDetails::AsDeleted() {
PropertyDetails d(DONT_ENUM, NORMAL);
Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1));
return PropertyDetails(smi);
}
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
ASSERT(object->Is##type()); \
@ -409,6 +416,13 @@ bool Object::IsOddball() {
}
bool Object::IsJSGlobalPropertyCell() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type()
== JS_GLOBAL_PROPERTY_CELL_TYPE;
}
bool Object::IsSharedFunctionInfo() {
return Object::IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
@ -467,7 +481,7 @@ bool Object::IsDictionary() {
bool Object::IsSymbolTable() {
return IsHashTable() && this == Heap::symbol_table();
return IsHashTable() && this == Heap::raw_unchecked_symbol_table();
}
@ -685,7 +699,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
int Smi::value() {
return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kSmiTagSize);
return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize;
}
@ -754,6 +768,8 @@ int Failure::value() const {
Failure* Failure::RetryAfterGC(int requested_bytes) {
// Assert that the space encoding fits in the three bytes allotted for it.
ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
int requested = requested_bytes >> kObjectAlignmentBits;
int value = (requested << kSpaceTagSize) | NEW_SPACE;
ASSERT(value >> kSpaceTagSize == requested);
@ -1046,6 +1062,18 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
Object* JSGlobalPropertyCell::value() {
return READ_FIELD(this, kValueOffset);
}
void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
}
int JSObject::GetHeaderSize() {
switch (map()->instance_type()) {
case JS_GLOBAL_PROXY_TYPE:
@ -1323,6 +1351,56 @@ Smi* DescriptorArray::GetDetails(int descriptor_number) {
}
PropertyType DescriptorArray::GetType(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return PropertyDetails(GetDetails(descriptor_number)).type();
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
return Descriptor::IndexFromValue(GetValue(descriptor_number));
}
JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
return JSFunction::cast(GetValue(descriptor_number));
}
Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
ASSERT(GetType(descriptor_number) == CALLBACKS);
return GetValue(descriptor_number);
}
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
ASSERT(GetType(descriptor_number) == CALLBACKS);
Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
return reinterpret_cast<AccessorDescriptor*>(p->proxy());
}
bool DescriptorArray::IsProperty(int descriptor_number) {
return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
}
bool DescriptorArray::IsTransition(int descriptor_number) {
PropertyType t = GetType(descriptor_number);
return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
}
bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
return GetType(descriptor_number) == NULL_DESCRIPTOR;
}
bool DescriptorArray::IsDontEnum(int descriptor_number) {
return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum();
}
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
desc->Init(GetKey(descriptor_number),
GetValue(descriptor_number),
@ -1346,6 +1424,13 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
}
void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
Descriptor desc;
src->Get(src_index, &desc);
Set(index, &desc);
}
void DescriptorArray::Swap(int first, int second) {
fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
FixedArray* content_array = GetContentArray();
@ -1354,15 +1439,14 @@ void DescriptorArray::Swap(int first, int second) {
}
bool Dictionary::requires_slow_elements() {
bool NumberDictionary::requires_slow_elements() {
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return false;
return 0 !=
(Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask);
}
uint32_t Dictionary::max_number_key() {
uint32_t NumberDictionary::max_number_key() {
ASSERT(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
@ -1370,8 +1454,7 @@ uint32_t Dictionary::max_number_key() {
return value >> kRequiresSlowElementsTagSize;
}
void Dictionary::set_requires_slow_elements() {
void NumberDictionary::set_requires_slow_elements() {
set(kMaxNumberKeyIndex,
Smi::FromInt(kRequiresSlowElementsMask),
SKIP_WRITE_BARRIER);
@ -1384,7 +1467,6 @@ void Dictionary::set_requires_slow_elements() {
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(Dictionary)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(MapCache)
@ -1403,6 +1485,7 @@ CAST_ACCESSOR(Failure)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(JSGlobalPropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(JSFunction)
@ -1422,9 +1505,9 @@ CAST_ACCESSOR(Struct)
STRUCT_LIST(MAKE_STRUCT_CAST)
#undef MAKE_STRUCT_CAST
template <int prefix_size, int elem_size>
HashTable<prefix_size, elem_size>* HashTable<prefix_size, elem_size>::cast(
Object* obj) {
template <typename Shape, typename Key>
HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
ASSERT(obj->IsHashTable());
return reinterpret_cast<HashTable*>(obj);
}
@ -2451,15 +2534,15 @@ bool JSObject::HasIndexedInterceptor() {
}
Dictionary* JSObject::property_dictionary() {
StringDictionary* JSObject::property_dictionary() {
ASSERT(!HasFastProperties());
return Dictionary::cast(properties());
return StringDictionary::cast(properties());
}
Dictionary* JSObject::element_dictionary() {
NumberDictionary* JSObject::element_dictionary() {
ASSERT(!HasFastElements());
return Dictionary::cast(elements());
return NumberDictionary::cast(elements());
}
@ -2623,16 +2706,17 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
}
void Dictionary::SetEntry(int entry,
template<typename Shape, typename Key>
void Dictionary<Shape, Key>::SetEntry(int entry,
Object* key,
Object* value,
PropertyDetails details) {
ASSERT(!key->IsString() || details.index() > 0);
int index = EntryToIndex(entry);
WriteBarrierMode mode = GetWriteBarrierMode();
set(index, key, mode);
set(index+1, value, mode);
fast_set(this, index+2, details.AsSmi());
ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
int index = HashTable<Shape, Key>::EntryToIndex(entry);
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
FixedArray::set(index, key, mode);
FixedArray::set(index+1, value, mode);
FixedArray::fast_set(this, index+2, details.AsSmi());
}
@ -2640,8 +2724,8 @@ void Map::ClearCodeCache() {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, Heap::empty_fixed_array());
ASSERT(!Heap::InNewSpace(Heap::raw_unchecked_empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, Heap::raw_unchecked_empty_fixed_array());
}

1119
deps/v8/src/objects.cc

File diff suppressed because it is too large

687
deps/v8/src/objects.h

File diff suppressed because it is too large

292
deps/v8/src/parser.cc

@ -361,7 +361,7 @@ class BufferedZoneList {
};
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder {
class RegExpBuilder: public ZoneObject {
public:
RegExpBuilder();
void AddCharacter(uc16 character);
@ -392,7 +392,10 @@ class RegExpBuilder {
RegExpBuilder::RegExpBuilder()
: pending_empty_(false), characters_(NULL), terms_(), alternatives_()
: pending_empty_(false),
characters_(NULL),
terms_(),
alternatives_()
#ifdef DEBUG
, last_added_(ADD_NONE)
#endif
@ -594,6 +597,44 @@ class RegExpParser {
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
private:
enum SubexpressionType {
INITIAL,
CAPTURE, // All positive values represent captures.
POSITIVE_LOOKAHEAD,
NEGATIVE_LOOKAHEAD,
GROUPING
};
class RegExpParserState : public ZoneObject {
public:
RegExpParserState(RegExpParserState* previous_state,
SubexpressionType group_type,
int disjunction_capture_index)
: previous_state_(previous_state),
builder_(new RegExpBuilder()),
group_type_(group_type),
disjunction_capture_index_(disjunction_capture_index) {}
// Parser state of containing expression, if any.
RegExpParserState* previous_state() { return previous_state_; }
bool IsSubexpression() { return previous_state_ != NULL; }
// RegExpBuilder building this regexp's AST.
RegExpBuilder* builder() { return builder_; }
// Type of regexp being parsed (parenthesized group or entire regexp).
SubexpressionType group_type() { return group_type_; }
// Index in captures array of first capture in this sub-expression, if any.
// Also the capture index of this sub-expression itself, if group_type
// is CAPTURE.
int capture_index() { return disjunction_capture_index_; }
private:
// Linked list implementation of stack of states.
RegExpParserState* previous_state_;
// Builder for the stored disjunction.
RegExpBuilder* builder_;
// Stored disjunction type (capture, look-ahead or grouping), if any.
SubexpressionType group_type_;
// Stored disjunction's capture index (if any).
int disjunction_capture_index_;
};
uc32 current() { return current_; }
bool has_more() { return has_more_; }
@ -601,7 +642,6 @@ class RegExpParser {
uc32 Next();
FlatStringReader* in() { return in_; }
void ScanForCaptures();
bool CaptureAvailable(int index);
uc32 current_;
bool has_more_;
bool multiline_;
@ -1536,10 +1576,10 @@ VariableProxy* AstBuildingParser::Declare(Handle<String> name,
// to the calling function context.
if (top_scope_->is_function_scope()) {
// Declare the variable in the function scope.
var = top_scope_->LookupLocal(name);
var = top_scope_->LocalLookup(name);
if (var == NULL) {
// Declare the name.
var = top_scope_->Declare(name, mode);
var = top_scope_->DeclareLocal(name, mode);
} else {
// The name was declared before; check for conflicting
// re-declarations. If the previous declaration was a const or the
@ -2005,7 +2045,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
// 'continue' Identifier? ';'
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label(static_cast<String**>(NULL));
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
if (!scanner_.has_line_terminator_before_next() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
@ -3426,7 +3466,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK);
if (!is_pre_parsing_) {
top_scope_->AddParameter(top_scope_->Declare(param_name,
top_scope_->AddParameter(top_scope_->DeclareLocal(param_name,
Variable::VAR));
num_parameters++;
}
@ -3808,9 +3848,7 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) {
// Disjunction
RegExpTree* RegExpParser::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
if (has_more()) {
ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
}
ASSERT(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
@ -3820,14 +3858,6 @@ RegExpTree* RegExpParser::ParsePattern() {
}
bool RegExpParser::CaptureAvailable(int index) {
if (captures_ == NULL) return false;
if (index >= captures_->length()) return false;
RegExpCapture* capture = captures_->at(index);
return capture != NULL && capture->available() == CAPTURE_AVAILABLE;
}
// Disjunction ::
// Alternative
// Alternative | Disjunction
@ -3839,24 +3869,60 @@ bool RegExpParser::CaptureAvailable(int index) {
// Atom
// Atom Quantifier
RegExpTree* RegExpParser::ParseDisjunction() {
RegExpBuilder builder;
int capture_start_index = captures_started();
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(NULL, INITIAL, 0);
RegExpParserState* stored_state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
while (true) {
switch (current()) {
case kEndMarker:
case ')':
return builder.ToRegExp();
case '|': {
if (stored_state->IsSubexpression()) {
// Inside a parenthesized group when hitting end of input.
ReportError(CStrVector("Unterminated group") CHECK_FAILED);
}
ASSERT_EQ(INITIAL, stored_state->group_type());
// Parsing completed successfully.
return builder->ToRegExp();
case ')': {
if (!stored_state->IsSubexpression()) {
ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
}
ASSERT_NE(INITIAL, stored_state->group_type());
Advance();
builder.NewAlternative();
int capture_new_alt_start_index = captures_started();
for (int i = capture_start_index; i < capture_new_alt_start_index; i++) {
RegExpCapture* capture = captures_->at(i);
if (capture->available() == CAPTURE_AVAILABLE) {
capture->set_available(CAPTURE_UNREACHABLE);
// End disjunction parsing and convert builder content to new single
// regexp atom.
RegExpTree* body = builder->ToRegExp();
int end_capture_index = captures_started();
int capture_index = stored_state->capture_index();
SubexpressionType type = stored_state->group_type();
// Restore previous state.
stored_state = stored_state->previous_state();
builder = stored_state->builder();
// Build result of subexpression.
if (type == CAPTURE) {
RegExpCapture* capture = new RegExpCapture(body, capture_index);
captures_->at(capture_index - 1) = capture;
body = capture;
} else if (type != GROUPING) {
ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
bool is_positive = (type == POSITIVE_LOOKAHEAD);
body = new RegExpLookahead(body,
is_positive,
end_capture_index - capture_index,
capture_index);
}
builder->AddAtom(body);
break;
}
capture_start_index = capture_new_alt_start_index;
case '|': {
Advance();
builder->NewAlternative();
continue;
}
case '*':
@ -3866,10 +3932,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '^': {
Advance();
if (multiline_) {
builder.AddAssertion(
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::START_OF_LINE));
} else {
builder.AddAssertion(
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
}
@ -3880,7 +3946,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::Type type =
multiline_ ? RegExpAssertion::END_OF_LINE :
RegExpAssertion::END_OF_INPUT;
builder.AddAssertion(new RegExpAssertion(type));
builder->AddAssertion(new RegExpAssertion(type));
continue;
}
case '.': {
@ -3889,17 +3955,47 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape('.', ranges);
RegExpTree* atom = new RegExpCharacterClass(ranges, false);
builder.AddAtom(atom);
builder->AddAtom(atom);
break;
}
case '(': {
RegExpTree* atom = ParseGroup(CHECK_FAILED);
builder.AddAtom(atom);
SubexpressionType type = CAPTURE;
Advance();
if (current() == '?') {
switch (Next()) {
case ':':
type = GROUPING;
break;
case '=':
type = POSITIVE_LOOKAHEAD;
break;
case '!':
type = NEGATIVE_LOOKAHEAD;
break;
default:
ReportError(CStrVector("Invalid group") CHECK_FAILED);
break;
}
Advance(2);
} else {
if (captures_ == NULL) {
captures_ = new ZoneList<RegExpCapture*>(2);
}
if (captures_started() >= kMaxCaptures) {
ReportError(CStrVector("Too many captures") CHECK_FAILED);
}
captures_->Add(NULL);
}
// Store current state and begin new disjunction parsing.
stored_state = new RegExpParserState(stored_state,
type,
captures_started());
builder = stored_state->builder();
break;
}
case '[': {
RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
builder.AddAtom(atom);
builder->AddAtom(atom);
break;
}
// Atom ::
@ -3910,12 +4006,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
case 'b':
Advance(2);
builder.AddAssertion(
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
builder.AddAssertion(
builder->AddAssertion(
new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
@ -3929,27 +4025,29 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
CharacterRange::AddClassEscape(c, ranges);
RegExpTree* atom = new RegExpCharacterClass(ranges, false);
builder.AddAtom(atom);
goto has_read_atom; // Avoid setting has_character_escapes_.
builder->AddAtom(atom);
break;
}
case '1': case '2': case '3': case '4': case '5': case '6':
case '7': case '8': case '9': {
int index = 0;
if (ParseBackReferenceIndex(&index)) {
if (!CaptureAvailable(index - 1)) {
// Prepare to ignore a following quantifier
builder.AddEmpty();
goto has_read_atom;
RegExpCapture* capture = NULL;
if (captures_ != NULL && index <= captures_->length()) {
capture = captures_->at(index - 1);
}
if (capture == NULL) {
builder->AddEmpty();
break;
}
RegExpCapture* capture = captures_->at(index - 1);
RegExpTree* atom = new RegExpBackReference(capture);
builder.AddAtom(atom);
goto has_read_atom; // Avoid setting has_character_escapes_.
builder->AddAtom(atom);
break;
}
uc32 first_digit = Next();
if (first_digit == '8' || first_digit == '9') {
// Treat as identity escape
builder.AddCharacter(first_digit);
builder->AddCharacter(first_digit);
Advance(2);
break;
}
@ -3958,44 +4056,44 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '0': {
Advance();
uc32 octal = ParseOctalLiteral();
builder.AddCharacter(octal);
builder->AddCharacter(octal);
break;
}
// ControlEscape :: one of
// f n r t v
case 'f':
Advance(2);
builder.AddCharacter('\f');
builder->AddCharacter('\f');
break;
case 'n':
Advance(2);
builder.AddCharacter('\n');
builder->AddCharacter('\n');
break;
case 'r':
Advance(2);
builder.AddCharacter('\r');
builder->AddCharacter('\r');
break;
case 't':
Advance(2);
builder.AddCharacter('\t');
builder->AddCharacter('\t');
break;
case 'v':
Advance(2);
builder.AddCharacter('\v');
builder->AddCharacter('\v');
break;
case 'c': {
Advance(2);
uc32 control = ParseControlLetterEscape();
builder.AddCharacter(control);
builder->AddCharacter(control);
break;
}
case 'x': {
Advance(2);
uc32 value;
if (ParseHexEscape(2, &value)) {
builder.AddCharacter(value);
builder->AddCharacter(value);
} else {
builder.AddCharacter('x');
builder->AddCharacter('x');
}
break;
}
@ -4003,15 +4101,15 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
uc32 value;
if (ParseHexEscape(4, &value)) {
builder.AddCharacter(value);
builder->AddCharacter(value);
} else {
builder.AddCharacter('u');
builder->AddCharacter('u');
}
break;
}
default:
// Identity escape.
builder.AddCharacter(Next());
builder->AddCharacter(Next());
Advance(2);
break;
}
@ -4024,12 +4122,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// fallthrough
}
default:
builder.AddCharacter(current());
builder->AddCharacter(current());
Advance();
break;
} // end switch(current())
has_read_atom:
int min;
int max;
switch (current()) {
@ -4071,7 +4168,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
is_greedy = false;
Advance();
}
builder.AddQuantifierToAtom(min, max, is_greedy);
builder->AddQuantifierToAtom(min, max, is_greedy);
}
}
@ -4382,73 +4479,6 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
}
RegExpTree* RegExpParser::ParseGroup() {
ASSERT_EQ(current(), '(');
char type = '(';
Advance();
if (current() == '?') {
switch (Next()) {
case ':': case '=': case '!':
type = Next();
Advance(2);
break;
default:
ReportError(CStrVector("Invalid group") CHECK_FAILED);
break;
}
} else {
if (captures_ == NULL) {
captures_ = new ZoneList<RegExpCapture*>(2);
}
if (captures_started() >= kMaxCaptures) {
ReportError(CStrVector("Too many captures") CHECK_FAILED);
}
captures_->Add(NULL);
}
int capture_index = captures_started();
RegExpTree* body = ParseDisjunction(CHECK_FAILED);
if (current() != ')') {
ReportError(CStrVector("Unterminated group") CHECK_FAILED);
}
Advance();
int end_capture_index = captures_started();
if (type == '!') {
// Captures inside a negative lookahead are never available outside it.
for (int i = capture_index; i < end_capture_index; i++) {
RegExpCapture* capture = captures_->at(i);
ASSERT(capture != NULL);
capture->set_available(CAPTURE_PERMANENTLY_UNREACHABLE);
}
} else {
// Captures temporarily unavailable because they are in different
// alternatives are all available after the disjunction.
for (int i = capture_index; i < end_capture_index; i++) {
RegExpCapture* capture = captures_->at(i);
ASSERT(capture != NULL);
if (capture->available() == CAPTURE_UNREACHABLE) {
capture->set_available(CAPTURE_AVAILABLE);
}
}
}
if (type == '(') {
RegExpCapture* capture = new RegExpCapture(body, capture_index);
captures_->at(capture_index - 1) = capture;
return capture;
} else if (type == ':') {
return body;
} else {
ASSERT(type == '=' || type == '!');
bool is_positive = (type == '=');
return new RegExpLookahead(body,
is_positive,
end_capture_index - capture_index,
capture_index);
}
}
CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
ASSERT_EQ(0, *char_class);
uc32 first = current();

1
deps/v8/src/platform-freebsd.cc

@ -561,6 +561,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.sp = mcontext.mc_esp;
sample.fp = mcontext.mc_ebp;
#endif
active_sampler_->SampleStack(&sample);
}
// We always sample the VM state.

1
deps/v8/src/platform-linux.cc

@ -639,6 +639,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.fp = mcontext.arm_fp;
#endif
#endif
active_sampler_->SampleStack(&sample);
}
// We always sample the VM state.

153
deps/v8/src/platform-macos.cc

@ -38,6 +38,7 @@
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <mach/mach.h>
#include <mach/semaphore.h>
#include <mach/task.h>
#include <sys/time.h>
@ -475,63 +476,94 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
class Sampler::PlatformData : public Malloced {
public:
explicit PlatformData(Sampler* sampler)
: sampler_(sampler),
task_self_(mach_task_self()),
profiled_thread_(0),
sampler_thread_(0) {
}
Sampler* sampler_;
// Note: for profiled_thread_ Mach primitives are used instead of PThread's
// because the latter doesn't provide thread manipulation primitives required.
// For details, consult "Mac OS X Internals" book, Section 7.3.
mach_port_t task_self_;
thread_act_t profiled_thread_;
pthread_t sampler_thread_;
// Sampler thread handler.
void Runner() {
// Loop until the sampler is disengaged.
while (sampler_->IsActive()) {
TickSample sample;
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& KERN_SUCCESS == thread_suspend(profiled_thread_)) {
#if V8_HOST_ARCH_X64
thread_state_flavor_t flavor = x86_THREAD_STATE64;
x86_thread_state64_t state;
mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
#elif V8_HOST_ARCH_IA32
thread_state_flavor_t flavor = i386_THREAD_STATE;
i386_thread_state_t state;
mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
#else
#error Unsupported Mac OS X host architecture.
#endif // V8_TARGET_ARCH_IA32
if (thread_get_state(profiled_thread_,
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
#if V8_HOST_ARCH_X64
UNIMPLEMENTED();
USE(mcontext);
sample.pc = 0;
sample.sp = 0;
sample.fp = 0;
#elif V8_HOST_ARCH_IA32
#if __DARWIN_UNIX03
sample.pc = mcontext->__ss.__eip;
sample.sp = mcontext->__ss.__esp;
sample.fp = mcontext->__ss.__ebp;
sample.pc = state.__eip;
sample.sp = state.__esp;
sample.fp = state.__ebp;
#else // !__DARWIN_UNIX03
sample.pc = mcontext->ss.eip;
sample.sp = mcontext->ss.esp;
sample.fp = mcontext->ss.ebp;
sample.pc = state.eip;
sample.sp = state.esp;
sample.fp = state.ebp;
#endif // __DARWIN_UNIX03
#else
#error Unsupported Mac OS X host architecture.
#endif // V8_TARGET_ARCH_IA32
#endif // V8_HOST_ARCH_IA32
sampler_->SampleStack(&sample);
}
thread_resume(profiled_thread_);
}
// We always sample the VM state.
sample.state = Logger::state();
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
active_sampler_->Tick(&sample);
// Wait until next sampling.
usleep(sampler_->interval_ * 1000);
}
}
};
class Sampler::PlatformData : public Malloced {
public:
PlatformData() {
signal_handler_installed_ = false;
// Entry point for sampler thread.
static void* SamplerEntry(void* arg) {
Sampler::PlatformData* data =
reinterpret_cast<Sampler::PlatformData*>(arg);
data->Runner();
return 0;
}
bool signal_handler_installed_;
struct sigaction old_signal_handler_;
struct itimerval old_timer_value_;
};
Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) {
data_ = new PlatformData();
data_ = new PlatformData(this);
}
@ -541,43 +573,40 @@ Sampler::~Sampler() {
void Sampler::Start() {
// There can only be one active sampler at the time on POSIX
// platforms.
if (active_sampler_ != NULL) return;
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
data_->signal_handler_installed_ = true;
// Set the itimer to generate a tick for each interval.
itimerval itimer;
itimer.it_interval.tv_sec = interval_ / 1000;
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
// Set this sampler as the active sampler.
active_sampler_ = this;
// If we are profiling, we need to be able to access the calling
// thread.
if (IsProfiling()) {
data_->profiled_thread_ = mach_thread_self();
}
// Create sampler thread with high priority.
// According to POSIX spec, when SCHED_FIFO policy is used, a thread
// runs until it exits or blocks.
pthread_attr_t sched_attr;
sched_param fifo_param;
pthread_attr_init(&sched_attr);
pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED);
pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO);
fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
pthread_attr_setschedparam(&sched_attr, &fifo_param);
active_ = true;
pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
}
void Sampler::Stop() {
// Restore old signal handler
if (data_->signal_handler_installed_) {
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false;
}
// This sampler is no longer the active sampler.
active_sampler_ = NULL;
// Seting active to false triggers termination of the sampler
// thread.
active_ = false;
// Wait for sampler thread to terminate.
pthread_join(data_->sampler_thread_, NULL);
// Deallocate Mach port for thread.
if (IsProfiling()) {
mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
}
}
#endif // ENABLE_LOGGING_AND_PROFILING

16
deps/v8/src/platform-win32.cc

@ -1776,12 +1776,10 @@ class Sampler::PlatformData : public Malloced {
TickSample sample;
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()) {
// Pause the profiled thread and get its context.
SuspendThread(profiled_thread_);
if (sampler_->IsProfiling()
&& SuspendThread(profiled_thread_) != (DWORD)-1) {
context.ContextFlags = CONTEXT_FULL;
GetThreadContext(profiled_thread_, &context);
// Invoke tick handler with program counter and stack pointer.
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
UNIMPLEMENTED();
sample.pc = context.Rip;
@ -1792,16 +1790,16 @@ class Sampler::PlatformData : public Malloced {
sample.sp = context.Esp;
sample.fp = context.Ebp;
#endif
sampler_->SampleStack(&sample);
}
ResumeThread(profiled_thread_);
}
// We always sample the VM state.
sample.state = Logger::state();
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
if (sampler_->IsProfiling()) {
ResumeThread(profiled_thread_);
}
// Wait until next sampling.
Sleep(sampler_->interval_);
}

7
deps/v8/src/platform.h

@ -510,6 +510,9 @@ class Sampler {
explicit Sampler(int interval, bool profiling);
virtual ~Sampler();
// Performs stack sampling.
virtual void SampleStack(TickSample* sample) = 0;
// This method is called for each sampling period with the current
// program counter.
virtual void Tick(TickSample* sample) = 0;
@ -527,8 +530,8 @@ class Sampler {
class PlatformData;
private:
int interval_;
bool profiling_;
const int interval_;
const bool profiling_;
bool active_;
PlatformData* data_; // Platform specific data.
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);

14
deps/v8/src/property.cc

@ -31,20 +31,6 @@ namespace v8 {
namespace internal {
void DescriptorWriter::Write(Descriptor* desc) {
ASSERT(desc->key_->IsSymbol());
descriptors_->Set(pos_, desc);
advance();
}
void DescriptorWriter::WriteFrom(DescriptorReader* reader) {
Descriptor desc;
reader->Get(&desc);
Write(&desc);
}
#ifdef DEBUG
void LookupResult::Print() {
if (!IsValid()) {

101
deps/v8/src/property.h

@ -95,8 +95,6 @@ class Descriptor BASE_EMBEDDED {
value_(value),
details_(attributes, type, index) { }
friend class DescriptorWriter;
friend class DescriptorReader;
friend class DescriptorArray;
};
@ -230,6 +228,7 @@ class LookupResult BASE_EMBEDDED {
bool IsReadOnly() { return details_.IsReadOnly(); }
bool IsDontDelete() { return details_.IsDontDelete(); }
bool IsDontEnum() { return details_.IsDontEnum(); }
bool IsDeleted() { return details_.IsDeleted(); }
bool IsValid() { return lookup_type_ != NOT_FOUND; }
bool IsNotFound() { return lookup_type_ == NOT_FOUND; }
@ -256,8 +255,14 @@ class LookupResult BASE_EMBEDDED {
switch (type()) {
case FIELD:
return holder()->FastPropertyAt(GetFieldIndex());
case NORMAL:
return holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
if (holder()->IsGlobalObject()) {
value = JSGlobalPropertyCell::cast(value)->value();
}
return value;
}
case CONSTANT_FUNCTION:
return GetConstantFunction();
default:
@ -306,7 +311,7 @@ class LookupResult BASE_EMBEDDED {
}
// In the dictionary case, the data is held in the value field.
ASSERT(lookup_type_ == DICTIONARY_TYPE);
return holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
return holder()->GetNormalizedProperty(this);
}
private:
@ -317,92 +322,6 @@ class LookupResult BASE_EMBEDDED {
};
// The DescriptorStream is an abstraction for iterating over a map's
// instance descriptors.
class DescriptorStream BASE_EMBEDDED {
public:
explicit DescriptorStream(DescriptorArray* descriptors, int pos) {
descriptors_ = descriptors;
pos_ = pos;
limit_ = descriptors_->number_of_descriptors();
}
// Tells whether we have reached the end of the steam.
bool eos() { return pos_ >= limit_; }
int next_position() { return pos_ + 1; }
void advance() { pos_ = next_position(); }
protected:
DescriptorArray* descriptors_;
int pos_; // Current position.
int limit_; // Limit for position.
};
class DescriptorReader: public DescriptorStream {
public:
explicit DescriptorReader(DescriptorArray* descriptors, int pos = 0)
: DescriptorStream(descriptors, pos) {}
String* GetKey() { return descriptors_->GetKey(pos_); }
Object* GetValue() { return descriptors_->GetValue(pos_); }
PropertyDetails GetDetails() {
return PropertyDetails(descriptors_->GetDetails(pos_));
}
int GetFieldIndex() { return Descriptor::IndexFromValue(GetValue()); }
bool IsDontEnum() { return GetDetails().IsDontEnum(); }
PropertyType type() { return GetDetails().type(); }
// Tells whether the type is a transition.
bool IsTransition() {
PropertyType t = type();
ASSERT(t != INTERCEPTOR);
return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
}
bool IsNullDescriptor() {
return type() == NULL_DESCRIPTOR;
}
bool IsProperty() {
return type() < FIRST_PHANTOM_PROPERTY_TYPE;
}
JSFunction* GetConstantFunction() { return JSFunction::cast(GetValue()); }
AccessorDescriptor* GetCallbacks() {
ASSERT(type() == CALLBACKS);
Proxy* p = Proxy::cast(GetCallbacksObject());
return reinterpret_cast<AccessorDescriptor*>(p->proxy());
}
Object* GetCallbacksObject() {
ASSERT(type() == CALLBACKS);
return GetValue();
}
bool Equals(String* name) { return name->Equals(GetKey()); }
void Get(Descriptor* desc) {
descriptors_->Get(pos_, desc);
}
};
class DescriptorWriter: public DescriptorStream {
public:
explicit DescriptorWriter(DescriptorArray* descriptors)
: DescriptorStream(descriptors, 0) {}
// Append a descriptor to this stream.
void Write(Descriptor* desc);
// Read a descriptor from the reader and append it to this stream.
void WriteFrom(DescriptorReader* reader);
};
} } // namespace v8::internal
#endif // V8_PROPERTY_H_

2
deps/v8/src/regexp-delay.js

@ -103,7 +103,7 @@ function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
function RegExpConstructor(pattern, flags) {
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
DoConstructRegExp(this, pattern, flags, true);
} else {
// RegExp : Called as function; see ECMA-262, section 15.10.3.1.

249
deps/v8/src/runtime.cc

@ -168,7 +168,7 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
}
}
} else {
Dictionary* element_dictionary = copy->element_dictionary();
NumberDictionary* element_dictionary = copy->element_dictionary();
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = element_dictionary->KeyAt(i);
@ -413,48 +413,6 @@ static Object* Runtime_ClassOf(Arguments args) {
}
static Object* Runtime_HasStringClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::String_symbol()));
}
static Object* Runtime_HasDateClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Date_symbol()));
}
static Object* Runtime_HasArrayClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Array_symbol()));
}
static Object* Runtime_HasFunctionClass(Arguments args) {
return Heap::ToBoolean(
args[0]->HasSpecificClassOf(Heap::function_class_symbol()));
}
static Object* Runtime_HasNumberClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Number_symbol()));
}
static Object* Runtime_HasBooleanClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Boolean_symbol()));
}
static Object* Runtime_HasArgumentsClass(Arguments args) {
return Heap::ToBoolean(
args[0]->HasSpecificClassOf(Heap::Arguments_symbol()));
}
static Object* Runtime_HasRegExpClass(Arguments args) {
return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::RegExp_symbol()));
}
static Object* Runtime_IsInPrototypeChain(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@ -618,9 +576,6 @@ static Object* Runtime_DeclareGlobals(Arguments args) {
// property as read-only, so we don't either.
PropertyAttributes base = is_eval ? NONE : DONT_DELETE;
// Only optimize the object if we intend to add more than 5 properties.
OptimizedObjectForAddingMultipleProperties ba(global, pairs->length()/2 > 5);
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
for (int i = 0; i < length; i += 2) {
@ -814,18 +769,24 @@ static Object* Runtime_InitializeVarGlobal(Arguments args) {
PropertyAttributes attributes = DONT_DELETE;
// Lookup the property locally in the global object. If it isn't
// there, we add the property and take special precautions to always
// add it as a local property even in case of callbacks in the
// prototype chain (this rules out using SetProperty).
// We have IgnoreAttributesAndSetLocalProperty for this.
// there, there is a property with this name in the prototype chain.
// We follow Safari and Firefox behavior and only set the property
// locally if there is an explicit initialization value that we have
// to assign to the property. When adding the property we take
// special precautions to always add it as a local property even in
// case of callbacks in the prototype chain (this rules out using
// SetProperty). We have IgnoreAttributesAndSetLocalProperty for
// this.
LookupResult lookup;
global->LocalLookup(*name, &lookup);
if (!lookup.IsProperty()) {
Object* value = (assign) ? args[1] : Heap::undefined_value();
if (assign) {
return global->IgnoreAttributesAndSetLocalProperty(*name,
value,
args[1],
attributes);
}
return Heap::undefined_value();
}
// Determine if this is a redeclaration of something read-only.
if (lookup.IsReadOnly()) {
@ -932,10 +893,8 @@ static Object* Runtime_InitializeConstGlobal(Arguments args) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
Dictionary* dictionary = global->property_dictionary();
int entry = lookup.GetDictionaryEntry();
if (dictionary->ValueAt(entry)->IsTheHole()) {
dictionary->ValueAtPut(entry, *value);
if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
global->SetNormalizedProperty(&lookup, *value);
}
} else {
// Ignore re-initialization of constants that have already been
@ -1025,10 +984,8 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
properties->set(index, *value);
}
} else if (type == NORMAL) {
Dictionary* dictionary = context_ext->property_dictionary();
int entry = lookup.GetDictionaryEntry();
if (dictionary->ValueAt(entry)->IsTheHole()) {
dictionary->ValueAtPut(entry, *value);
if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
context_ext->SetNormalizedProperty(&lookup, *value);
}
} else {
// We should not reach here. Any real, named property should be
@ -1059,16 +1016,16 @@ static Object* Runtime_RegExpExec(Arguments args) {
ASSERT(args.length() == 4);
CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_CHECKED(String, subject, 1);
// Due to the way the JS files are constructed this must be less than the
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_CHECKED(Smi, index, args[2]);
CONVERT_SMI_CHECKED(index, args[2]);
CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index->value() >= 0);
RUNTIME_ASSERT(index->value() <= subject->length());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index->value(),
index,
last_match_info);
if (result.is_null()) return Failure::Exception();
return *result;
@ -1156,6 +1113,21 @@ static Object* Runtime_FunctionGetScriptSourcePosition(Arguments args) {
}
static Object* Runtime_FunctionGetPositionForOffset(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, fun, args[0]);
CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
Code* code = fun->code();
RUNTIME_ASSERT(0 <= offset && offset < code->Size());
Address pc = code->address() + offset;
return Smi::FromInt(fun->code()->SourcePosition(pc));
}
static Object* Runtime_FunctionSetInstanceClassName(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@ -2626,23 +2598,25 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
}
// Lookup cache miss. Perform lookup and update the cache if
// appropriate.
// Lookup cache miss. Perform lookup and update the cache if appropriate.
LookupResult result;
receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
int offset = result.GetFieldIndex();
KeyedLookupCache::Update(receiver_map, key, offset);
Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value;
return receiver->FastPropertyAt(offset);
}
} else {
// Attempt dictionary lookup.
Dictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindStringEntry(key);
if ((entry != DescriptorArray::kNotFound) &&
StringDictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindEntry(key);
if ((entry != StringDictionary::kNotFound) &&
(dictionary->DetailsAt(entry).type() == NORMAL)) {
return dictionary->ValueAt(entry);
Object* value = dictionary->ValueAt(entry);
if (!receiver->IsGlobalObject()) return value;
value = JSGlobalPropertyCell::cast(value)->value();
if (!value->IsTheHole()) return value;
// If value is the hole do the general lookup.
}
}
}
@ -4179,16 +4153,21 @@ static Object* Runtime_Math_pow(Arguments args) {
}
CONVERT_DOUBLE_CHECKED(y, args[1]);
if (!isinf(x)) {
if (y == 0.5) {
// It's not uncommon to use Math.pow(x, 0.5) to compute the square
// root of a number. To speed up such computations, we explictly
// check for this case and use the sqrt() function which is faster
// than pow().
// It's not uncommon to use Math.pow(x, 0.5) to compute the
// square root of a number. To speed up such computations, we
// explictly check for this case and use the sqrt() function
// which is faster than pow().
return Heap::AllocateHeapNumber(sqrt(x));
} else if (y == -0.5) {
// Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
return Heap::AllocateHeapNumber(1.0 / sqrt(x));
} else if (y == 0) {
}
}
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return Heap::nan_value();
@ -4337,7 +4316,7 @@ static Object* Runtime_NewObject(Arguments args) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into constructors if step into is active.
if (Debug::StepInActive()) {
Debug::HandleStepIn(function, 0, true);
Debug::HandleStepIn(function, Handle<Object>::null(), 0, true);
}
#endif
@ -4522,17 +4501,25 @@ static Object* Runtime_LookupContext(Arguments args) {
// compiler to do the right thing.
//
// TODO(1236026): This is a non-portable hack that should be removed.
// TODO(x64): Definitely!
typedef uint64_t ObjectPair;
#ifdef V8_HOST_ARCH_64_BIT
// Tested with GCC, not with MSVC.
struct ObjectPair {
Object* x;
Object* y;
};
static inline ObjectPair MakePair(Object* x, Object* y) {
#if V8_HOST_ARCH_64_BIT
UNIMPLEMENTED();
return 0;
ObjectPair result = {x, y};
return result; // Pointers x and y returned in rax and rdx, in AMD-x64-abi.
}
#else
typedef uint64_t ObjectPair;
static inline ObjectPair MakePair(Object* x, Object* y) {
return reinterpret_cast<uint32_t>(x) |
(reinterpret_cast<ObjectPair>(y) << 32);
#endif
}
#endif
static inline Object* Unhole(Object* x, PropertyAttributes attributes) {
@ -5152,8 +5139,8 @@ class ArrayConcatVisitor {
storage_->set(index, *elm);
} else {
Handle<Dictionary> dict = Handle<Dictionary>::cast(storage_);
Handle<Dictionary> result =
Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_);
Handle<NumberDictionary> result =
Factory::DictionaryAtNumberPut(dict, index, elm);
if (!result.is_identical_to(dict))
storage_ = result;
@ -5201,7 +5188,7 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
}
} else {
Handle<Dictionary> dict(receiver->element_dictionary());
Handle<NumberDictionary> dict(receiver->element_dictionary());
uint32_t capacity = dict->Capacity();
for (uint32_t j = 0; j < capacity; j++) {
Handle<Object> k(dict->KeyAt(j));
@ -5355,7 +5342,7 @@ static Object* Runtime_ArrayConcat(Arguments args) {
uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast(
Factory::NewDictionary(at_least_space_for));
Factory::NewNumberDictionary(at_least_space_for));
}
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@ -5418,7 +5405,7 @@ static Object* Runtime_EstimateNumberOfElements(Arguments args) {
CONVERT_CHECKED(JSArray, array, args[0]);
HeapObject* elements = array->elements();
if (elements->IsDictionary()) {
return Smi::FromInt(Dictionary::cast(elements)->NumberOfElements());
return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
} else {
return array->length();
}
@ -5560,15 +5547,12 @@ static Object* DebugLookupResultValue(Object* receiver, String* name,
bool* caught_exception) {
Object* value;
switch (result->type()) {
case NORMAL: {
Dictionary* dict =
JSObject::cast(result->holder())->property_dictionary();
value = dict->ValueAt(result->GetDictionaryEntry());
case NORMAL:
value = result->holder()->GetNormalizedProperty(result);
if (value->IsTheHole()) {
return Heap::undefined_value();
}
return value;
}
case FIELD:
value =
JSObject::cast(
@ -7408,6 +7392,81 @@ static Object* Runtime_GetScript(Arguments args) {
}
// Determines whether the given stack frame should be displayed in
// a stack trace. The caller is the error constructor that asked
// for the stack trace to be collected. The first time a construct
// call to this function is encountered it is skipped. The seen_caller
// in/out parameter is used to remember if the caller has been seen
// yet.
static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
bool* seen_caller) {
// Only display JS frames.
if (!raw_frame->is_java_script())
return false;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Object* raw_fun = frame->function();
// Not sure when this can happen but skip it just in case.
if (!raw_fun->IsJSFunction())
return false;
if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) {
*seen_caller = true;
return false;
}
// Skip the most obvious builtin calls. Some builtin calls (such as
// Number.ADD which is invoked using 'call') are very difficult to
// recognize so we're leaving them in for now.
return !frame->receiver()->IsJSBuiltinsObject();
}
// Collect the raw data for a stack trace. Returns an array of three
// element segments each containing a receiver, function and native
// code offset.
static Object* Runtime_CollectStackTrace(Arguments args) {
ASSERT_EQ(args.length(), 2);
Object* caller = args[0];
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
HandleScope scope;
int initial_size = limit < 10 ? limit : 10;
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
StackFrameIterator iter;
bool seen_caller = false;
int cursor = 0;
int frames_seen = 0;
while (!iter.done() && frames_seen < limit) {
StackFrame* raw_frame = iter.frame();
if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) {
frames_seen++;
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
Object* recv = frame->receiver();
Object* fun = frame->function();
Address pc = frame->pc();
Address start = frame->code()->address();
Smi* offset = Smi::FromInt(pc - start);
FixedArray* elements = result->elements();
if (cursor + 2 < elements->length()) {
elements->set(cursor++, recv);
elements->set(cursor++, fun);
elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
} else {
HandleScope scope;
SetElement(result, cursor++, Handle<Object>(recv));
SetElement(result, cursor++, Handle<Object>(fun));
SetElement(result, cursor++, Handle<Smi>(offset));
}
}
iter.Advance();
}
result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER);
return *result;
}
static Object* Runtime_Abort(Arguments args) {
ASSERT(args.length() == 2);
OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +

10
deps/v8/src/runtime.h

@ -169,18 +169,12 @@ namespace internal {
F(FunctionGetSourceCode, 1) \
F(FunctionGetScript, 1) \
F(FunctionGetScriptSourcePosition, 1) \
F(FunctionGetPositionForOffset, 2) \
F(FunctionIsAPIFunction, 1) \
F(GetScript, 1) \
F(CollectStackTrace, 2) \
\
F(ClassOf, 1) \
F(HasDateClass, 1) \
F(HasStringClass, 1) \
F(HasArrayClass, 1) \
F(HasFunctionClass, 1) \
F(HasNumberClass, 1) \
F(HasBooleanClass, 1) \
F(HasArgumentsClass, 1) \
F(HasRegExpClass, 1) \
F(SetCode, 2) \
\
F(CreateApiFunction, 1) \

27
deps/v8/src/runtime.js

@ -161,14 +161,31 @@ function ADD(x) {
// Left operand (this) is already a string.
function STRING_ADD_LEFT(y) {
if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT));
if (!IS_STRING(y)) {
if (IS_STRING_WRAPPER(y)) {
y = %_ValueOf(y);
} else {
y = IS_NUMBER(y)
? %NumberToString(y)
: %ToString(%ToPrimitive(y, NO_HINT));
}
}
return %StringAdd(this, y);
}
// Right operand (y) is already a string.
function STRING_ADD_RIGHT(y) {
var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT));
var x = this;
if (!IS_STRING(x)) {
if (IS_STRING_WRAPPER(x)) {
x = %_ValueOf(x);
} else {
x = IS_NUMBER(x)
? %NumberToString(x)
: %ToString(%ToPrimitive(x, NO_HINT));
}
}
return %StringAdd(x, y);
}
@ -394,7 +411,7 @@ function APPLY_PREPARE(args) {
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
// that takes care of more eventualities.
if (%_IsArray(args)) {
if (IS_ARRAY(args)) {
length = args.length;
if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {
return length;
@ -415,9 +432,7 @@ function APPLY_PREPARE(args) {
}
// Make sure the arguments list has the right type.
if (args != null &&
!%HasArrayClass(args) &&
!%HasArgumentsClass(args)) {
if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
throw %MakeTypeError('apply_wrong_args', []);
}

107
deps/v8/src/scopes.cc

@ -71,28 +71,28 @@ static bool Match(void* key1, void* key2) {
// Dummy constructor
LocalsMap::LocalsMap(bool gotta_love_static_overloading) : HashMap() {}
VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
LocalsMap::LocalsMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
LocalsMap::~LocalsMap() {}
VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
VariableMap::~VariableMap() {}
Variable* LocalsMap::Declare(Scope* scope,
Variable* VariableMap::Declare(Scope* scope,
Handle<String> name,
Variable::Mode mode,
bool is_valid_LHS,
bool is_valid_lhs,
Variable::Kind kind) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location());
p->value = new Variable(scope, name, mode, is_valid_LHS, kind);
p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
}
return reinterpret_cast<Variable*>(p->value);
}
Variable* LocalsMap::Lookup(Handle<String> name) {
Variable* VariableMap::Lookup(Handle<String> name) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
if (p != NULL) {
ASSERT(*reinterpret_cast<String**>(p->key) == *name);
@ -110,7 +110,7 @@ Variable* LocalsMap::Lookup(Handle<String> name) {
// Dummy constructor
Scope::Scope()
: inner_scopes_(0),
locals_(false),
variables_(false),
temps_(0),
params_(0),
dynamics_(NULL),
@ -168,27 +168,26 @@ void Scope::Initialize(bool inside_with) {
// instead load them directly from the stack. Currently, the only
// such parameter is 'this' which is passed on the stack when
// invoking scripts
{ Variable* var =
locals_.Declare(this, Factory::this_symbol(), Variable::VAR,
Variable* var =
variables_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
receiver_->BindTo(var);
}
if (is_function_scope()) {
// Declare 'arguments' variable which exists in all functions.
// Note that it may never be accessed, in which case it won't
// be allocated during variable allocation.
locals_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
variables_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
true, Variable::ARGUMENTS);
}
}
Variable* Scope::LookupLocal(Handle<String> name) {
return locals_.Lookup(name);
Variable* Scope::LocalLookup(Handle<String> name) {
return variables_.Lookup(name);
}
@ -196,7 +195,7 @@ Variable* Scope::Lookup(Handle<String> name) {
for (Scope* scope = this;
scope != NULL;
scope = scope->outer_scope()) {
Variable* var = scope->LookupLocal(name);
Variable* var = scope->LocalLookup(name);
if (var != NULL) return var;
}
return NULL;
@ -210,18 +209,25 @@ Variable* Scope::DeclareFunctionVar(Handle<String> name) {
}
Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) {
Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
// DYNAMIC variables are introduces during variable allocation,
// INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST);
return locals_.Declare(this, name, mode, true, Variable::NORMAL);
return variables_.Declare(this, name, mode, true, Variable::NORMAL);
}
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
return variables_.Declare(this, name, Variable::DYNAMIC, true,
Variable::NORMAL);
}
void Scope::AddParameter(Variable* var) {
ASSERT(is_function_scope());
ASSERT(LookupLocal(var->name()) == var);
ASSERT(LocalLookup(var->name()) == var);
params_.Add(var);
}
@ -291,7 +297,9 @@ void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
locals->Add(var);
}
}
for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var->var_uses()->is_used()) {
locals->Add(var);
@ -410,8 +418,8 @@ static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) {
}
static void PrintMap(PrettyPrinter* printer, int indent, LocalsMap* map) {
for (LocalsMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
static void PrintMap(PrettyPrinter* printer, int indent, VariableMap* map) {
for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
PrintVar(printer, indent, var);
}
@ -478,7 +486,7 @@ void Scope::Print(int n) {
}
Indent(n1, "// local vars\n");
PrintMap(&printer, n1, &locals_);
PrintMap(&printer, n1, &variables_);
Indent(n1, "// dynamic vars\n");
if (dynamics_ != NULL) {
@ -502,7 +510,7 @@ void Scope::Print(int n) {
Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
LocalsMap* map = dynamics_->GetMap(mode);
VariableMap* map = dynamics_->GetMap(mode);
Variable* var = map->Lookup(name);
if (var == NULL) {
// Declare a new non-local.
@ -530,7 +538,7 @@ Variable* Scope::LookupRecursive(Handle<String> name,
bool guess = scope_calls_eval_;
// Try to find the variable in this scope.
Variable* var = LookupLocal(name);
Variable* var = LocalLookup(name);
if (var != NULL) {
// We found a variable. If this is not an inner lookup, we are done.
@ -621,8 +629,7 @@ void Scope::ResolveVariable(Scope* global_scope,
scope_calls_eval_ || outer_scope_calls_eval_)) {
// We must have a global variable.
ASSERT(global_scope != NULL);
var = new Variable(global_scope, proxy->name(),
Variable::DYNAMIC, true, Variable::NORMAL);
var = global_scope->DeclareGlobal(proxy->name());
} else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up
@ -706,26 +713,26 @@ bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
bool Scope::MustAllocate(Variable* var) {
// Give var a read/write use if there is a chance it might be
// accessed via an eval() call, or if it is a global variable.
// This is only possible if the variable has a visible name.
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope_ ||
scope_calls_eval_ || inner_scope_calls_eval_ ||
scope_contains_with_ || var->is_global())) {
scope_contains_with_)) {
var->var_uses()->RecordAccess(1);
}
return var->var_uses()->is_used();
// Global variables do not need to be allocated.
return !var->is_global() && var->var_uses()->is_used();
}
bool Scope::MustAllocateInContext(Variable* var) {
// If var is accessed from an inner scope, or if there is a
// possibility that it might be accessed from the current or
// an inner scope (through an eval() call), it must be allocated
// in the context.
// Exceptions: Global variables and temporary variables must
// never be allocated in the (FixedArray part of the) context.
// possibility that it might be accessed from the current or an inner
// scope (through an eval() call), it must be allocated in the
// context. Exception: temporary variables are not allocated in the
// context.
return
var->mode() != Variable::TEMPORARY &&
(var->is_accessed_from_inner_scope_ ||
@ -755,7 +762,7 @@ void Scope::AllocateHeapSlot(Variable* var) {
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
Variable* arguments = LookupLocal(Factory::arguments_symbol());
Variable* arguments = LocalLookup(Factory::arguments_symbol());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
if (MustAllocate(arguments) && !HasArgumentsParameter()) {
// 'arguments' is used. Unless there is also a parameter called
@ -865,7 +872,7 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->rewrite_ == NULL ||
(!var->IsVariable(Factory::result_symbol())) ||
(var->slot() == NULL || var->slot()->type() != Slot::LOCAL));
if (MustAllocate(var) && var->rewrite_ == NULL) {
if (var->rewrite_ == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
AllocateHeapSlot(var);
} else {
@ -876,27 +883,21 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
void Scope::AllocateNonParameterLocals() {
// Each variable occurs exactly once in the locals_ list; all
// variables that have no rewrite yet are non-parameter locals.
// Sort them according to use such that the locals with more uses
// get allocated first.
if (FLAG_usage_computation) {
// This is currently not implemented.
}
// All variables that have no rewrite yet are non-parameter locals.
for (int i = 0; i < temps_.length(); i++) {
AllocateNonParameterLocal(temps_[i]);
}
for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) {
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
AllocateNonParameterLocal(var);
}
// Note: For now, function_ must be allocated at the very end. If
// it gets allocated in the context, it must be the last slot in the
// context, because of the current ScopeInfo implementation (see
// For now, function_ must be allocated at the very end. If it gets
// allocated in the context, it must be the last slot in the context,
// because of the current ScopeInfo implementation (see
// ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
if (function_ != NULL) {
AllocateNonParameterLocal(function_);

58
deps/v8/src/scopes.h

@ -35,19 +35,22 @@ namespace v8 {
namespace internal {
// A hash map to support fast local variable declaration and lookup.
class LocalsMap: public HashMap {
// A hash map to support fast variable declaration and lookup.
class VariableMap: public HashMap {
public:
LocalsMap();
VariableMap();
// Dummy constructor. This constructor doesn't set up the map
// properly so don't use it unless you have a good reason.
explicit LocalsMap(bool gotta_love_static_overloading);
explicit VariableMap(bool gotta_love_static_overloading);
virtual ~LocalsMap();
virtual ~VariableMap();
Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode,
bool is_valid_LHS, Variable::Kind kind);
Variable* Declare(Scope* scope,
Handle<String> name,
Variable::Mode mode,
bool is_valid_lhs,
Variable::Kind kind);
Variable* Lookup(Handle<String> name);
};
@ -59,14 +62,14 @@ class LocalsMap: public HashMap {
// and setup time for scopes that don't need them.
class DynamicScopePart : public ZoneObject {
public:
LocalsMap* GetMap(Variable::Mode mode) {
VariableMap* GetMap(Variable::Mode mode) {
int index = mode - Variable::DYNAMIC;
ASSERT(index >= 0 && index < 3);
return &maps_[index];
}
private:
LocalsMap maps_[3];
VariableMap maps_[3];
};
@ -105,7 +108,7 @@ class Scope: public ZoneObject {
// Declarations
// Lookup a variable in this scope. Returns the variable or NULL if not found.
virtual Variable* LookupLocal(Handle<String> name);
virtual Variable* LocalLookup(Handle<String> name);
// Lookup a variable in this scope or outer scopes.
// Returns the variable or NULL if not found.
@ -116,9 +119,15 @@ class Scope: public ZoneObject {
// outer scope. Only possible for function scopes; at most one variable.
Variable* DeclareFunctionVar(Handle<String> name);
// Declare a variable in this scope. If the variable has been
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
virtual Variable* Declare(Handle<String> name, Variable::Mode mode);
virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
// Declare an implicit global variable in this scope which must be a
// global scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
// with statements or eval calls.
Variable* DeclareGlobal(Handle<String> name);
// Add a parameter to the parameter list. The parameter must have been
// declared via Declare. The same parameter may occur more then once in
@ -288,25 +297,28 @@ class Scope: public ZoneObject {
Handle<String> scope_name_;
// The variables declared in this scope:
// all user-declared variables (incl. parameters)
LocalsMap locals_;
// compiler-allocated (user-invisible) temporaries
//
// All user-declared variables (incl. parameters). For global scopes
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
// Compiler-allocated (user-invisible) temporaries.
ZoneList<Variable*> temps_;
// parameter list in source order
// Parameter list in source order.
ZoneList<Variable*> params_;
// variables that must be looked up dynamically
// Variables that must be looked up dynamically.
DynamicScopePart* dynamics_;
// unresolved variables referred to from this scope
// Unresolved variables referred to from this scope.
ZoneList<VariableProxy*> unresolved_;
// declarations
// Declarations.
ZoneList<Declaration*> decls_;
// convenience variable
// Convenience variable.
VariableProxy* receiver_;
// function variable, if any; function scopes only
// Function variable, if any; function scopes only.
Variable* function_;
// convenience variable; function scopes only
// Convenience variable; function scopes only.
VariableProxy* arguments_;
// convenience variable; function scopes only
// Convenience variable; function scopes only.
VariableProxy* arguments_shadow_;
// Illegal redeclaration.

92
deps/v8/src/serialize.cc

@ -42,47 +42,44 @@
namespace v8 {
namespace internal {
// Encoding: a RelativeAddress must be able to fit in a pointer:
// it is encoded as an Address with (from MS to LS bits):
// 27 bits identifying a word in the space, in one of three formats:
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
// 32-bit encoding: a RelativeAddress must be able to fit in a
// pointer: it is encoded as an Address with (from LS to MS bits):
// - 2 bits identifying this as a HeapObject.
// - 4 bits to encode the AllocationSpace (including special values for
// code and fixed arrays in LO space)
// - 27 bits identifying a word in the space, in one of three formats:
// - paged spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
// 3 bits to encode the AllocationSpace (special values for code in LO space)
// 2 bits identifying this as a HeapObject
const int kSpaceShift = kHeapObjectTagSize;
const int kSpaceBits = kSpaceTagSize;
const int kSpaceMask = kSpaceTagMask;
// These value are used instead of space numbers when serializing/
// deserializing. They indicate an object that is in large object space, but
// should be treated specially.
// Make the pages executable on platforms that support it:
const int kLOSpaceExecutable = LAST_SPACE + 1;
// Reserve space for write barrier bits (for objects that can contain
// references to new space):
const int kLOSpacePointer = LAST_SPACE + 2;
const int kSpaceBits = 4;
const int kSpaceMask = (1 << kSpaceBits) - 1;
const int kOffsetShift = kSpaceShift + kSpaceBits;
const int kOffsetBits = 11;
const int kOffsetMask = (1 << kOffsetBits) - 1;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageShift = kOffsetShift + kOffsetBits;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageMask = (1 << kPageBits) - 1;
const int kPageAndOffsetShift = kOffsetShift;
const int kPageAndOffsetBits = kPageBits + kOffsetBits;
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
// These values are special allocation space tags used for
// serialization.
// Mar the pages executable on platforms that support it.
const int kLargeCode = LAST_SPACE + 1;
// Allocate extra remembered-set bits.
const int kLargeFixedArray = LAST_SPACE + 2;
static inline AllocationSpace GetSpace(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
if (space_number > LAST_SPACE) space_number = LO_SPACE;
return static_cast<AllocationSpace>(space_number);
}
@ -91,7 +88,7 @@ static inline bool IsLargeExecutableObject(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpaceExecutable);
return (space_number == kLargeCode);
}
@ -99,7 +96,7 @@ static inline bool IsLargeFixedArray(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpacePointer);
return (space_number == kLargeFixedArray);
}
@ -147,6 +144,9 @@ class RelativeAddress {
int page_index,
int page_offset)
: space_(space), page_index_(page_index), page_offset_(page_offset) {
// Assert that the space encoding (plus the two pseudo-spaces for
// special large objects) fits in the available bits.
ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
ASSERT(space <= LAST_SPACE && space >= 0);
}
@ -154,8 +154,7 @@ class RelativeAddress {
Address Encode() const;
AllocationSpace space() const {
if (space_ == kLOSpaceExecutable) return LO_SPACE;
if (space_ == kLOSpacePointer) return LO_SPACE;
if (space_ > LAST_SPACE) return LO_SPACE;
return static_cast<AllocationSpace>(space_);
}
int page_index() const { return page_index_; }
@ -165,7 +164,8 @@ class RelativeAddress {
return space_ == CODE_SPACE ||
space_ == OLD_POINTER_SPACE ||
space_ == OLD_DATA_SPACE ||
space_ == MAP_SPACE;
space_ == MAP_SPACE ||
space_ == CELL_SPACE;
}
void next_address(int offset) { page_offset_ += offset; }
@ -180,11 +180,11 @@ class RelativeAddress {
void set_to_large_code_object() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpaceExecutable;
space_ = kLargeCode;
}
void set_to_large_fixed_array() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpacePointer;
space_ = kLargeFixedArray;
}
@ -201,6 +201,7 @@ Address RelativeAddress::Encode() const {
int result = 0;
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
@ -216,8 +217,8 @@ Address RelativeAddress::Encode() const {
result = word_offset << kPageAndOffsetShift;
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT_EQ(0, page_offset_);
ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
result = page_index_ << kPageAndOffsetShift;
@ -235,6 +236,7 @@ void RelativeAddress::Verify() {
ASSERT(page_offset_ >= 0 && page_index_ >= 0);
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
@ -245,8 +247,8 @@ void RelativeAddress::Verify() {
ASSERT(page_index_ == 0);
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT(page_offset_ == 0);
break;
}
@ -291,6 +293,7 @@ class SimulatedHeapSpace {
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
@ -307,12 +310,15 @@ void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE: {
PagedSpace* ps;
if (space == MAP_SPACE) {
ps = Heap::map_space();
} else if (space == CELL_SPACE) {
ps = Heap::cell_space();
} else if (space == OLD_POINTER_SPACE) {
ps = Heap::old_pointer_space();
} else if (space == OLD_DATA_SPACE) {
@ -699,6 +705,7 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
10,
"Debug::step_in_fp_addr()");
#endif
Add(ExternalReference::double_fp_operation(Token::ADD).address(),
UNCLASSIFIED,
11,
@ -711,7 +718,18 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
13,
"mul_two_doubles");
#endif
Add(ExternalReference::double_fp_operation(Token::DIV).address(),
UNCLASSIFIED,
14,
"div_two_doubles");
Add(ExternalReference::double_fp_operation(Token::MOD).address(),
UNCLASSIFIED,
15,
"mod_two_doubles");
Add(ExternalReference::compare_doubles().address(),
UNCLASSIFIED,
16,
"compare_doubles");
}
@ -1109,6 +1127,8 @@ void Serializer::PutHeader() {
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::map_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::cell_space()->Size());
writer_->PutC(']');
// Write global handles.
writer_->PutC('G');
@ -1291,6 +1311,7 @@ static const int kInitArraySize = 32;
Deserializer::Deserializer(const byte* str, int len)
: reader_(str, len),
map_pages_(kInitArraySize),
cell_pages_(kInitArraySize),
old_pointer_pages_(kInitArraySize),
old_data_pages_(kInitArraySize),
code_pages_(kInitArraySize),
@ -1463,6 +1484,8 @@ void Deserializer::GetHeader() {
InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
reader_.ExpectC(']');
// Create placeholders for global handles later to be fill during
// IterateRoots.
@ -1595,6 +1618,9 @@ Object* Deserializer::Resolve(Address encoded) {
case MAP_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::map_space(), &map_pages_);
case CELL_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::cell_space(), &cell_pages_);
case OLD_POINTER_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::old_pointer_space(), &old_pointer_pages_);

3
deps/v8/src/serialize.h

@ -321,9 +321,10 @@ class Deserializer: public ObjectVisitor {
// Resolve caches the following:
List<Page*> map_pages_; // All pages in the map space.
List<Page*> cell_pages_; // All pages in the cell space.
List<Page*> old_pointer_pages_; // All pages in the old pointer space.
List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_;
List<Page*> code_pages_; // All pages in the code space.
List<Object*> large_objects_; // All known large objects.
// A list of global handles at deserialization time.
List<Object**> global_handles_;

23
deps/v8/src/spaces-inl.h

@ -93,17 +93,21 @@ Address Page::AllocationTop() {
void Page::ClearRSet() {
#ifndef V8_HOST_ARCH_64_BIT
// This method can be called in all rset states.
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
#endif
}
// Give an address a (32-bits):
// Given a 32-bit address, separate its bits into:
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
// The rset address is computed as:
// The address of the rset word containing the bit for this word is computed as:
// page_address + words * 4
// For a 64-bit address, if it is:
// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) |
// The address of the rset word containing the bit for this word is computed as:
// page_address + quadwords * 4 + kRSetOffset.
// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
// even on the X64 architecture.
Address Page::ComputeRSetBitPosition(Address address, int offset,
uint32_t* bitmask) {
@ -115,7 +119,7 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
*bitmask = 1 << (bit_offset % kBitsPerInt);
Address rset_address =
page->address() + (bit_offset / kBitsPerInt) * kIntSize;
page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
// The remembered set address is either in the normal remembered set range
// of a page or else we have a large object page.
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
@ -131,8 +135,10 @@ Address Page::ComputeRSetBitPosition(Address address, int offset,
// of the object:
// (rset_address - page->ObjectAreaStart()).
// Ie, we can just add the object size.
// In the X64 architecture, the remembered set ends before the object start,
// so we need to add an additional offset, from rset end to object start
ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
rset_address +=
rset_address += kObjectStartOffset - kRSetEndOffset +
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
+ Array::kLengthOffset));
}
@ -160,14 +166,9 @@ void Page::UnsetRSet(Address address, int offset) {
bool Page::IsRSetSet(Address address, int offset) {
#ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Reenable when RSet works.
return true;
#else // V8_HOST_ARCH_64_BIT
uint32_t bitmask = 0;
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
return (Memory::uint32_at(rset_address) & bitmask) != 0;
#endif // V8_HOST_ARCH_64_BIT
}

354
deps/v8/src/spaces.cc

@ -786,6 +786,77 @@ void PagedSpace::Print() { }
#endif
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void PagedSpace::Verify(ObjectVisitor* visitor) {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if required as determined
// by the visitor.
int size = object->Size();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(), size, visitor);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
object->IterateBody(map->instance_type(), size, visitor);
}
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
#endif
// -----------------------------------------------------------------------------
// NewSpace implementation
@ -1141,7 +1212,7 @@ static void ReportHistogram(bool print_spill) {
// Summarize string types.
int string_number = 0;
int string_bytes = 0;
#define INCREMENT(type, size, name) \
#define INCREMENT(type, size, name, camel_name) \
string_number += heap_histograms[type].number(); \
string_bytes += heap_histograms[type].bytes();
STRING_TYPE_LIST(INCREMENT)
@ -1185,7 +1256,7 @@ static void DoReportStatistics(HistogramInfo* info, const char* description) {
// Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
#define INCREMENT(type, size, name) \
#define INCREMENT(type, size, name, camel_name) \
string_number += info[type].number(); \
string_bytes += info[type].bytes();
STRING_TYPE_LIST(INCREMENT)
@ -1265,13 +1336,13 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > Array::kHeaderSize) {
set_map(Heap::byte_array_map());
if (size_in_bytes > ByteArray::kAlignedSize) {
set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
set_map(Heap::one_word_filler_map());
set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
set_map(Heap::two_word_filler_map());
set_map(Heap::raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
@ -1280,16 +1351,26 @@ void FreeListNode::set_size(int size_in_bytes) {
Address FreeListNode::next() {
ASSERT(map() == Heap::byte_array_map());
ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
} else {
return Memory::Address_at(address() + kPointerSize);
}
}
void FreeListNode::set_next(Address next) {
ASSERT(map() == Heap::byte_array_map());
ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
} else {
Memory::Address_at(address() + kPointerSize) = next;
}
}
@ -1445,42 +1526,42 @@ bool OldSpaceFreeList::Contains(FreeListNode* node) {
#endif
MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
owner_ = owner;
FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
: owner_(owner), object_size_(object_size) {
Reset();
}
void MapSpaceFreeList::Reset() {
void FixedSizeFreeList::Reset() {
available_ = 0;
head_ = NULL;
}
void MapSpaceFreeList::Free(Address start) {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
for (int i = 0; i < Map::kSize; i += kPointerSize) {
for (int i = 0; i < object_size_; i += kPointerSize) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(Map::kSize);
node->set_size(object_size_);
node->set_next(head_);
head_ = node->address();
available_ += Map::kSize;
available_ += object_size_;
}
Object* MapSpaceFreeList::Allocate() {
Object* FixedSizeFreeList::Allocate() {
if (head_ == NULL) {
return Failure::RetryAfterGC(Map::kSize, owner_);
return Failure::RetryAfterGC(object_size_, owner_);
}
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
head_ = node->next();
available_ -= Map::kSize;
available_ -= object_size_;
return node;
}
@ -1494,7 +1575,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) {
// the space is considered 'available' and we will rediscover live data
// and waste during the collection.
MCResetRelocationInfo();
mc_end_of_relocation_ = bottom();
ASSERT(Available() == Capacity());
} else {
// During a non-compacting collection, everything below the linear
@ -1510,24 +1590,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) {
}
void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
ASSERT(Contains(address));
Address current_top = mc_end_of_relocation_;
Page* current_page = Page::FromAllocationTop(current_top);
// No more objects relocated to this page? Move to the next.
ASSERT(current_top <= current_page->mc_relocation_top);
if (current_top == current_page->mc_relocation_top) {
// The space should already be properly expanded.
Page* next_page = current_page->next_page();
CHECK(next_page->is_valid());
mc_end_of_relocation_ = next_page->ObjectAreaStart();
}
ASSERT(mc_end_of_relocation_ == address);
mc_end_of_relocation_ += size_in_bytes;
}
void OldSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
@ -1624,76 +1686,6 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void OldSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at the object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd());
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// The object should not be a map.
ASSERT(!object->IsMap());
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and have
// their remembered set bits set if they point to new space. Code
// objects do not have remembered set bits that we care about.
VerifyPointersAndRSetVisitor rset_visitor;
VerifyPointersVisitor no_rset_visitor;
int size = object->Size();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->IterateBody(map->instance_type(), size, &no_rset_visitor);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
object->IterateBody(map->instance_type(), size, &rset_visitor);
}
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
struct CommentStatistic {
const char* comment;
int size;
@ -1856,7 +1848,7 @@ void OldSpace::ReportStatistics() {
int bitpos = intoff*kBitsPerByte + bitoff;
Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
Object** obj = reinterpret_cast<Object**>(slot);
if (*obj == Heap::fixed_array_map()) {
if (*obj == Heap::raw_unchecked_fixed_array_map()) {
rset_marked_arrays++;
FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
@ -1987,25 +1979,13 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
// MapSpace implementation
// FixedSpace implementation
void MapSpace::PrepareForMarkCompact(bool will_compact) {
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
// Initialize map index entry.
int page_count = 0;
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
ASSERT_MAP_PAGE_INDEX(page_count);
Page* p = it.next();
ASSERT(p->mc_page_index == page_count);
page_addresses_[page_count++] = p->address();
}
// During a compacting collection, everything in the space is considered
// 'available' (set by the call to MCResetRelocationInfo) and we will
// rediscover live and wasted bytes during the collection.
@ -2023,7 +2003,7 @@ void MapSpace::PrepareForMarkCompact(bool will_compact) {
}
void MapSpace::MCCommitRelocationInfo() {
void FixedSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
@ -2053,7 +2033,8 @@ void MapSpace::MCCommitRelocationInfo() {
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
// Linear allocation in this space has failed. If there is another page
// in the space, move to that page and allocate there. This allocation
// should succeed.
@ -2062,10 +2043,10 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// There is no next page in this space. Try free list allocation. The
// map space free list implicitly assumes that all free blocks are map
// sized.
if (size_in_bytes == Map::kSize) {
// There is no next page in this space. Try free list allocation.
// The fixed space free list implicitly assumes that all free blocks
// are of the fixed size.
if (size_in_bytes == object_size_in_bytes_) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
@ -2094,81 +2075,19 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
// Move to the next page (there is assumed to be one) and allocate there.
// The top of page block is always wasted, because it is too small to hold a
// map.
HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
accounting_stats_.WasteBytes(kPageExtra);
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void MapSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
Page* top_page = Page::FromAllocationTop(allocation_info_.top);
ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
// Loop over all the pages.
bool above_allocation_top = false;
Page* current_page = first_page_;
while (current_page->is_valid()) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
// Unless this is the last page in the space containing allocated
// objects, the allocation top should be at a constant offset from the
// object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
}
// It should be packed with objects from the bottom to the top.
Address current = current_page->ObjectAreaStart();
while (current < top) {
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
// be in map space.
Map* map = object->map();
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// The object should be a map or a byte array.
ASSERT(object->IsMap() || object->IsByteArray());
// The object itself should look OK.
object->Verify();
// All the interior pointers should be contained in the heap and
// have their remembered set bits set if they point to new space.
VerifyPointersAndRSetVisitor visitor;
int size = object->Size();
object->IterateBody(map->instance_type(), size, &visitor);
current += size;
}
// The allocation pointer should not be in the middle of an object.
ASSERT(current == top);
}
current_page = current_page->next_page();
}
}
void MapSpace::ReportStatistics() {
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
@ -2215,7 +2134,50 @@ void MapSpace::ReportStatistics() {
}
void MapSpace::PrintRSet() { DoPrintRSet("map"); }
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
// -----------------------------------------------------------------------------
// MapSpace implementation
void MapSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
FixedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) {
// Initialize map index entry.
int page_count = 0;
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
ASSERT_MAP_PAGE_INDEX(page_count);
Page* p = it.next();
ASSERT(p->mc_page_index == page_count);
page_addresses_[page_count++] = p->address();
}
}
}
#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
ASSERT(object->IsMap() || object->IsByteArray());
}
#endif
// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
#ifdef DEBUG
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
ASSERT(object->IsJSGlobalPropertyCell() ||
object->map() == Heap::two_pointer_filler_map());
}
#endif

157
deps/v8/src/spaces.h

@ -93,13 +93,14 @@ class AllocationInfo;
// bytes are used as remembered set, and the rest of the page is the object
// area.
//
// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed
// Pointers are aligned to the pointer size (4), only 1 bit is needed
// for a pointer in the remembered set. Given an address, its remembered set
// bit position (offset from the start of the page) is calculated by dividing
// its page offset by 32. Therefore, the object area in a page starts at the
// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
// the first two words (64 bits) in a page can be used for other purposes.
// TODO(X64): This description only represents the 32-bit layout.
// On the 64-bit platform, we add an offset to the start of the remembered set.
//
// The mark-compact collector transforms a map pointer into a page index and a
// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
@ -217,15 +218,25 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// The offset of the remembered set in a page, in addition to the empty words
// formed as the remembered bits of the remembered set itself.
#ifdef V8_TARGET_ARCH_X64
static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
#else
static const int kRSetOffset = 0;
#endif
// The end offset of the remembered set in a page
// (heaps are aligned to pointer size).
static const int kRSetEndOffset= kPageSize / kBitsPerPointer;
// The start offset of the remembered set in a page.
static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer;
static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
// The start offset of the object area in a page.
static const int kObjectStartOffset = kRSetEndOffset;
// This needs to be at least (bits per uint32_t) * kBitsPerPointer,
// to align start of rset to a uint32_t address.
static const int kObjectStartOffset = 256;
// The start offset of the remembered set in a page.
static const int kRSetStartOffset = kRSetOffset +
kObjectStartOffset / kBitsPerPointer;
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@ -291,7 +302,6 @@ class Space : public Malloced {
virtual int Size() = 0;
#ifdef DEBUG
virtual void Verify() = 0;
virtual void Print() = 0;
#endif
@ -825,6 +835,13 @@ class PagedSpace : public Space {
// Print meta info and objects in this space.
virtual void Print();
// Verify integrity of this space.
virtual void Verify(ObjectVisitor* visitor);
// Overridden by subclasses to verify space-specific object
// properties (e.g., only maps or free-list nodes are in map space).
virtual void VerifyObject(HeapObject* obj) {}
// Report code object related statistics
void CollectCodeStatistics();
static void ReportCodeStatistics();
@ -851,6 +868,12 @@ class PagedSpace : public Space {
// Relocation information during mark-compact collections.
AllocationInfo mc_forwarding_info_;
// Bytes of each page that cannot be allocated. Possibly non-zero
// for pages in spaces with only fixed-size objects. Always zero
// for pages in spaces with variable sized objects (those pages are
// padded with free-list nodes).
int page_extra_;
// Sets allocation pointer to a page bottom.
static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
@ -1270,7 +1293,7 @@ class FreeListNode: public HeapObject {
inline void set_next(Address next);
private:
static const int kNextOffset = Array::kHeaderSize;
static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
};
@ -1304,7 +1327,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes. (Smaller allocations are allowed, but
// will always result in waste.)
static const int kMinBlockSize = Array::kHeaderSize + kPointerSize;
static const int kMinBlockSize = 2 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
// The identity of the owning space, for building allocation Failure
@ -1379,9 +1402,9 @@ class OldSpaceFreeList BASE_EMBEDDED {
// The free list for the map space.
class MapSpaceFreeList BASE_EMBEDDED {
class FixedSizeFreeList BASE_EMBEDDED {
public:
explicit MapSpaceFreeList(AllocationSpace owner);
FixedSizeFreeList(AllocationSpace owner, int object_size);
// Clear the free list.
void Reset();
@ -1390,12 +1413,12 @@ class MapSpaceFreeList BASE_EMBEDDED {
int available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to
// have size Map::kSize) is placed on the free list. Bookkeeping
// have size object_size_) is placed on the free list. Bookkeeping
// information will be written to the block, ie, its contents will be
// destroyed. The start address should be word aligned.
void Free(Address start);
// Allocate a map-sized block from the free list. The block is unitialized.
// Allocate a fixed sized block from the free list. The block is unitialized.
// A failure is returned if no block is available.
Object* Allocate();
@ -1410,7 +1433,10 @@ class MapSpaceFreeList BASE_EMBEDDED {
// objects.
AllocationSpace owner_;
DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
// The size of the objects in this space.
int object_size_;
DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
};
@ -1425,6 +1451,7 @@ class OldSpace : public PagedSpace {
AllocationSpace id,
Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
page_extra_ = 0;
}
// The bytes available on the free list (ie, not above the linear allocation
@ -1448,20 +1475,11 @@ class OldSpace : public PagedSpace {
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
// Adjust the top of relocation pointer to point to the end of the object
// given by 'address' and 'size_in_bytes'. Move it to the next page if
// necessary, ensure that it points to the address, then increment it by the
// size.
void MCAdjustRelocationEnd(Address address, int size_in_bytes);
// Updates the allocation pointer to the relocation top after a mark-compact
// collection.
virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
virtual void Verify();
// Reports statistics for the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
@ -1480,39 +1498,41 @@ class OldSpace : public PagedSpace {
// The space's free list.
OldSpaceFreeList free_list_;
// During relocation, we keep a pointer to the most recently relocated
// object in order to know when to move to the next page.
Address mc_end_of_relocation_;
public:
TRACK_MEMORY("OldSpace")
};
// -----------------------------------------------------------------------------
// Old space for all map objects
// Old space for objects of a fixed size
class MapSpace : public PagedSpace {
class FixedSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
explicit MapSpace(int max_capacity, AllocationSpace id)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
FixedSpace(int max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
: page->ObjectAreaEnd() - page_extra_;
}
// Give a map-sized block of memory to the space's free list.
int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
accounting_stats_.DeallocateBytes(Map::kSize);
}
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@ -1521,21 +1541,13 @@ class MapSpace : public PagedSpace {
virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
virtual void Verify();
// Reports statistic info of the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
// Constants.
static const int kMapPageIndexBits = 10;
static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
protected:
// Virtual function in the superclass. Slow path of AllocateRaw.
HeapObject* SlowAllocateRaw(int size_in_bytes);
@ -1545,9 +1557,41 @@ class MapSpace : public PagedSpace {
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
private:
// The size of objects in this space.
int object_size_in_bytes_;
// The name of this space.
const char* name_;
// The space's free list.
MapSpaceFreeList free_list_;
FixedSizeFreeList free_list_;
};
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map") {}
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
private:
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex + 1];
@ -1556,6 +1600,25 @@ class MapSpace : public PagedSpace {
};
// -----------------------------------------------------------------------------
// Old space for all global object property cell objects
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
public:
TRACK_MEMORY("MapSpace")
};
// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with

9
deps/v8/src/string-stream.cc

@ -343,10 +343,11 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
Add("<Invalid map>\n");
return;
}
for (DescriptorReader r(map->instance_descriptors()); !r.eos(); r.advance()) {
switch (r.type()) {
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < descs->number_of_descriptors(); i++) {
switch (descs->GetType(i)) {
case FIELD: {
Object* key = r.GetKey();
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
int len = 3;
if (key->IsString()) {
@ -360,7 +361,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
key->ShortPrint();
}
Add(": ");
Object* value = js_object->FastPropertyAt(r.GetFieldIndex());
Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
Add("%o\n", value);
}
}

10
deps/v8/src/string.js

@ -35,7 +35,7 @@
// Set the String function and constructor.
%SetCode($String, function(x) {
var value = %_ArgumentsLength() == 0 ? '' : ToString(x);
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
@ -46,7 +46,7 @@
// ECMA-262 section 15.5.4.2
function StringToString() {
if (!IS_STRING(this) && !%HasStringClass(this))
if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
throw new $TypeError('String.prototype.toString is not generic');
return %_ValueOf(this);
}
@ -54,7 +54,7 @@ function StringToString() {
// ECMA-262 section 15.5.4.3
function StringValueOf() {
if (!IS_STRING(this) && !%HasStringClass(this))
if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
throw new $TypeError('String.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@ -433,7 +433,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
if (m == 1) {
var s = CaptureString(subject, lastMatchInfo, 0);
// Don't call directly to avoid exposing the built-in global object.
return ToString(replace.call(null, s, index, subject));
return replace.call(null, s, index, subject);
}
var parameters = $Array(m + 2);
for (var j = 0; j < m; j++) {
@ -441,7 +441,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) {
}
parameters[j] = index;
parameters[j + 1] = subject;
return ToString(replace.apply(null, parameters));
return replace.apply(null, parameters);
}

98
deps/v8/src/stub-cache.cc

@ -172,6 +172,29 @@ Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
}
Object* StubCache::ComputeLoadGlobal(String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
bool is_dont_delete) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
LoadStubCompiler compiler;
code = compiler.CompileLoadGlobal(receiver,
holder,
cell,
name,
is_dont_delete);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
return Set(name, receiver->map(), Code::cast(code));
}
Object* StubCache::ComputeKeyedLoadField(String* name,
JSObject* receiver,
JSObject* holder,
@ -317,6 +340,23 @@ Object* StubCache::ComputeStoreField(String* name,
}
Object* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
StoreStubCompiler compiler;
code = compiler.CompileStoreGlobal(receiver, cell, name);
if (code->IsFailure()) return code;
LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
return Set(name, receiver->map(), Code::cast(code));
}
Object* StubCache::ComputeStoreCallback(String* name,
JSObject* receiver,
AccessorInfo* callback) {
@ -409,9 +449,10 @@ Object* StubCache::ComputeCallConstant(int argc,
// caches.
if (!function->is_compiled()) return Failure::InternalError();
// Compile the stub - only create stubs for fully compiled functions.
CallStubCompiler compiler(argc);
code = compiler.CompileCallConstant(object, holder, function, check, flags);
CallStubCompiler compiler(argc, in_loop);
code = compiler.CompileCallConstant(object, holder, function, name, check);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
@ -442,9 +483,10 @@ Object* StubCache::ComputeCallField(int argc,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc);
code = compiler.CompileCallField(object, holder, index, name, flags);
CallStubCompiler compiler(argc, in_loop);
code = compiler.CompileCallField(object, holder, index, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
@ -475,9 +517,10 @@ Object* StubCache::ComputeCallInterceptor(int argc,
argc);
Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
CallStubCompiler compiler(argc);
CallStubCompiler compiler(argc, NOT_IN_LOOP);
code = compiler.CompileCallInterceptor(object, holder, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result;
@ -496,11 +539,40 @@ Object* StubCache::ComputeCallNormal(int argc,
}
Object* StubCache::ComputeCallGlobal(int argc,
InLoopFlag in_loop,
String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
// If the function hasn't been compiled yet, we cannot do it now
// because it may cause GC. To avoid this issue, we return an
// internal error which will make sure we do not update any
// caches.
if (!function->is_compiled()) return Failure::InternalError();
CallStubCompiler compiler(argc, in_loop);
code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
if (code->IsFailure()) return code;
ASSERT_EQ(flags, Code::cast(code)->flags());
LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code;
}
return Set(name, receiver->map(), Code::cast(code));
}
static Object* GetProbeValue(Code::Flags flags) {
Dictionary* dictionary = Heap::non_monomorphic_cache();
int entry = dictionary->FindNumberEntry(flags);
// Use raw_unchecked... so we don't get assert failures during GC.
NumberDictionary* dictionary = Heap::raw_unchecked_non_monomorphic_cache();
int entry = dictionary->FindEntry(flags);
if (entry != -1) return dictionary->ValueAt(entry);
return Heap::undefined_value();
return Heap::raw_unchecked_undefined_value();
}
@ -514,7 +586,7 @@ static Object* ProbeCache(Code::Flags flags) {
Heap::non_monomorphic_cache()->AtNumberPut(flags,
Heap::undefined_value());
if (result->IsFailure()) return result;
Heap::set_non_monomorphic_cache(Dictionary::cast(result));
Heap::public_set_non_monomorphic_cache(NumberDictionary::cast(result));
return probe;
}
@ -522,7 +594,7 @@ static Object* ProbeCache(Code::Flags flags) {
static Object* FillCache(Object* code) {
if (code->IsCode()) {
int entry =
Heap::non_monomorphic_cache()->FindNumberEntry(
Heap::non_monomorphic_cache()->FindEntry(
Code::cast(code)->flags());
// The entry must be present see comment in ProbeCache.
ASSERT(entry != -1);
@ -885,6 +957,10 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
// Check for allocation failures during stub compilation.
if (failure_->IsFailure()) return failure_;
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
@ -933,7 +1009,7 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
type,
NOT_IN_LOOP,
in_loop_,
argc);
return GetCodeWithFlags(flags, name);
}

136
deps/v8/src/stub-cache.h

@ -78,6 +78,13 @@ class StubCache : public AllStatic {
static Object* ComputeLoadNormal(String* name, JSObject* receiver);
static Object* ComputeLoadGlobal(String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
bool is_dont_delete);
// ---
static Object* ComputeKeyedLoadField(String* name,
@ -112,6 +119,10 @@ class StubCache : public AllStatic {
int field_index,
Map* transition = NULL);
static Object* ComputeStoreGlobal(String* name,
GlobalObject* receiver,
JSGlobalPropertyCell* cell);
static Object* ComputeStoreCallback(String* name,
JSObject* receiver,
AccessorInfo* callback);
@ -151,6 +162,14 @@ class StubCache : public AllStatic {
Object* object,
JSObject* holder);
static Object* ComputeCallGlobal(int argc,
InLoopFlag in_loop,
String* name,
JSObject* receiver,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function);
// ---
static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop);
@ -180,11 +199,13 @@ class StubCache : public AllStatic {
static void GenerateMiss(MacroAssembler* masm);
// Generate code for probing the stub cache table.
// If extra != no_reg it might be used as am extra scratch register.
static void GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch);
Register scratch,
Register extra);
enum Table {
kPrimary,
@ -303,7 +324,7 @@ class StubCompiler BASE_EMBEDDED {
JSARRAY_HAS_FAST_ELEMENTS_CHECK
};
StubCompiler() : scope_(), masm_(NULL, 256) { }
StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
Object* CompileCallInitialize(Code::Flags flags);
Object* CompileCallPreMonomorphic(Code::Flags flags);
@ -323,40 +344,7 @@ class StubCompiler BASE_EMBEDDED {
static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src,
JSObject* holder, int index);
static void GenerateLoadField(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
Label* miss_label);
static void GenerateLoadCallback(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
Label* miss_label);
static void GenerateLoadConstant(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
Label* miss_label);
static void GenerateLoadInterceptor(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name,
Register scratch1,
Register scratch2,
Label* miss_label);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
Register scratch,
@ -391,10 +379,60 @@ class StubCompiler BASE_EMBEDDED {
Object* GetCodeWithFlags(Code::Flags flags, String* name);
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
// Check the integrity of the prototype chain to make sure that the
// current IC is still valid.
Register CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
Register holder_reg,
Register scratch,
String* name,
Label* miss);
void GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
int index,
String* name,
Label* miss);
void GenerateLoadCallback(JSObject* object,
JSObject* holder,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
AccessorInfo* callback,
String* name,
Label* miss);
void GenerateLoadConstant(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
Object* value,
String* name,
Label* miss);
void GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
Smi* lookup_hint,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss);
private:
HandleScope scope_;
MacroAssembler masm_;
Failure* failure_;
};
@ -416,6 +454,12 @@ class LoadStubCompiler: public StubCompiler {
JSObject* holder,
String* name);
Object* CompileLoadGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
String* name,
bool is_dont_delete);
private:
Object* GetCode(PropertyType type, String* name);
};
@ -457,6 +501,10 @@ class StoreStubCompiler: public StubCompiler {
AccessorInfo* callbacks,
String* name);
Object* CompileStoreInterceptor(JSObject* object, String* name);
Object* CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* holder,
String* name);
private:
Object* GetCode(PropertyType type, String* name);
@ -477,24 +525,30 @@ class KeyedStoreStubCompiler: public StubCompiler {
class CallStubCompiler: public StubCompiler {
public:
explicit CallStubCompiler(int argc) : arguments_(argc) { }
explicit CallStubCompiler(int argc, InLoopFlag in_loop)
: arguments_(argc), in_loop_(in_loop) { }
Object* CompileCallField(Object* object,
JSObject* holder,
int index,
String* name,
Code::Flags flags);
String* name);
Object* CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
CheckType check,
Code::Flags flags);
String* name,
CheckType check);
Object* CompileCallInterceptor(Object* object,
JSObject* holder,
String* name);
Object* CompileCallGlobal(JSObject* object,
GlobalObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name);
private:
const ParameterCount arguments_;
const InLoopFlag in_loop_;
const ParameterCount& arguments() { return arguments_; }

13
deps/v8/src/unicode.cc

@ -194,18 +194,13 @@ static int LookupMapping(const int32_t* table,
uchar Utf8::CalculateValue(const byte* str,
unsigned length,
unsigned* cursor) {
static const uchar kMaxOneByteChar = 0x7F;
static const uchar kMaxTwoByteChar = 0x7FF;
static const uchar kMaxThreeByteChar = 0xFFFF;
static const uchar kMaxFourByteChar = 0x1FFFFF;
// We only get called for non-ascii characters.
if (length == 1) {
*cursor += 1;
return kBadChar;
}
int first = str[0];
int second = str[1] ^ 0x80;
byte first = str[0];
byte second = str[1] ^ 0x80;
if (second & 0xC0) {
*cursor += 1;
return kBadChar;
@ -227,7 +222,7 @@ uchar Utf8::CalculateValue(const byte* str,
*cursor += 1;
return kBadChar;
}
int third = str[2] ^ 0x80;
byte third = str[2] ^ 0x80;
if (third & 0xC0) {
*cursor += 1;
return kBadChar;
@ -245,7 +240,7 @@ uchar Utf8::CalculateValue(const byte* str,
*cursor += 1;
return kBadChar;
}
int fourth = str[3] ^ 0x80;
byte fourth = str[3] ^ 0x80;
if (fourth & 0xC0) {
*cursor += 1;
return kBadChar;

6
deps/v8/src/v8-counters.h

@ -130,9 +130,15 @@ namespace internal {
SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \
SC(named_load_inline, V8.NamedLoadInline) \
SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
SC(keyed_store_field, V8.KeyedStoreField) \
SC(keyed_store_inline, V8.KeyedStoreInline) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
SC(call_global_inline, V8.CallGlobalInline) \
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \

22
deps/v8/src/v8natives.js

@ -174,7 +174,7 @@ SetupGlobal();
%SetCode($Boolean, function(x) {
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
%_SetValueOf(this, ToBoolean(x));
} else {
return ToBoolean(x);
@ -192,7 +192,7 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
var c = %ClassOf(this);
var c = %_ClassOf(this);
// Hide Arguments from the outside.
if (c === 'Arguments') c = 'Object';
return "[object " + c + "]";
@ -273,7 +273,7 @@ function ObjectLookupSetter(name) {
%SetCode($Object, function(x) {
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
if (x == null) return this;
return ToObject(x);
} else {
@ -311,7 +311,7 @@ SetupObject();
function BooleanToString() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
if (!IS_BOOLEAN(this) && !%HasBooleanClass(this))
if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
throw new $TypeError('Boolean.prototype.toString is not generic');
return ToString(%_ValueOf(this));
}
@ -320,7 +320,7 @@ function BooleanToString() {
function BooleanValueOf() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
if (!IS_BOOLEAN(this) && !%HasBooleanClass(this))
if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
throw new $TypeError('Boolean.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@ -350,7 +350,7 @@ SetupBoolean();
// Set the Number function and constructor.
%SetCode($Number, function(x) {
var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
if (%IsConstructCall()) {
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
@ -365,7 +365,7 @@ function NumberToString(radix) {
// 'this'. This is not as dictated by ECMA-262.
var number = this;
if (!IS_NUMBER(this)) {
if (!%HasNumberClass(this))
if (!IS_NUMBER_WRAPPER(this))
throw new $TypeError('Number.prototype.toString is not generic');
// Get the value of this number in case it's an object.
number = %_ValueOf(this);
@ -395,7 +395,7 @@ function NumberToLocaleString() {
function NumberValueOf() {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
if (!IS_NUMBER(this) && !%HasNumberClass(this))
if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
throw new $TypeError('Number.prototype.valueOf is not generic');
return %_ValueOf(this);
}
@ -502,10 +502,9 @@ SetupNumber();
$Function.prototype.constructor = $Function;
function FunctionSourceString(func) {
// NOTE: Both Function objects and values can enter here as
// 'func'. This is not as dictated by ECMA-262.
if (!IS_FUNCTION(func) && !%HasFunctionClass(func))
if (!IS_FUNCTION(func)) {
throw new $TypeError('Function.prototype.toString is not generic');
}
var source = %FunctionGetSourceCode(func);
if (!IS_STRING(source)) {
@ -570,4 +569,3 @@ function SetupFunction() {
}
SetupFunction();

14
deps/v8/src/variables.h

@ -143,6 +143,12 @@ class Variable: public ZoneObject {
ARGUMENTS
};
Variable(Scope* scope,
Handle<String> name,
Mode mode,
bool is_valid_lhs,
Kind kind);
// Printing support
static const char* Mode2String(Mode mode);
@ -196,9 +202,6 @@ class Variable: public ZoneObject {
SmiAnalysis* type() { return &type_; }
private:
Variable(Scope* scope, Handle<String> name, Mode mode, bool is_valid_LHS,
Kind kind);
Scope* scope_;
Handle<String> name_;
Mode mode_;
@ -219,10 +222,7 @@ class Variable: public ZoneObject {
// rewrite_ is usually a Slot or a Property, but may be any expression.
Expression* rewrite_;
friend class VariableProxy;
friend class Scope;
friend class LocalsMap;
friend class AstBuildingParser;
friend class Scope; // Has explicit access to rewrite_.
};

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 2
#define BUILD_NUMBER 10
#define BUILD_NUMBER 14
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

2
deps/v8/src/virtual-frame.h

@ -37,6 +37,8 @@
#include "x64/virtual-frame-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/virtual-frame-arm.h"
#else
#error Unsupported target architecture.
#endif
#endif // V8_VIRTUAL_FRAME_H_

201
deps/v8/src/x64/assembler-x64.cc

@ -73,45 +73,8 @@ XMMRegister xmm14 = { 14 };
XMMRegister xmm15 = { 15 };
Operand::Operand(Register base, int32_t disp): rex_(0) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
set_sib(times_1, rsp, base);
}
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
set_modrm(0, base);
} else if (is_int8(disp)) {
set_modrm(1, base);
set_disp8(disp);
} else {
set_modrm(2, base);
set_disp32(disp);
}
}
Operand::Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp): rex_(0) {
ASSERT(!index.is(rsp));
len_ = 1;
set_sib(scale, index, base);
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
// This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
// possibly set by set_sib.
set_modrm(0, rsp);
} else if (is_int8(disp)) {
set_modrm(1, rsp);
set_disp8(disp);
} else {
set_modrm(2, rsp);
set_disp32(disp);
}
}
// -----------------------------------------------------------------------------
// Implementation of CpuFeatures
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
@ -193,6 +156,71 @@ void CpuFeatures::Probe() {
ASSERT(IsSupported(CMOV));
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Call instruction takes up 13 bytes and int3 takes up one byte.
Address patch_site = pc_;
Memory::uint16_at(patch_site) = 0xBA49u; // movq r10, imm64
// Write "0x00, call r10" starting at last byte of address. We overwrite
// the 0x00 later, and this lets us write a uint32.
Memory::uint32_at(patch_site + 9) = 0xD2FF4900u; // 0x00, call r10
Memory::Address_at(patch_site + 2) = target;
// Add the requested number of int3 instructions after the call.
for (int i = 0; i < guard_bytes; i++) {
*(patch_site + 13 + i) = 0xCC; // int3
}
}
// -----------------------------------------------------------------------------
// Implementation of Operand
Operand::Operand(Register base, int32_t disp): rex_(0) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
set_sib(times_1, rsp, base);
}
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
set_modrm(0, base);
} else if (is_int8(disp)) {
set_modrm(1, base);
set_disp8(disp);
} else {
set_modrm(2, base);
set_disp32(disp);
}
}
Operand::Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp): rex_(0) {
ASSERT(!index.is(rsp));
len_ = 1;
set_sib(scale, index, base);
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
// This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
// possibly set by set_sib.
set_modrm(0, rsp);
} else if (is_int8(disp)) {
set_modrm(1, rsp);
set_disp8(disp);
} else {
set_modrm(2, rsp);
set_disp32(disp);
}
}
// -----------------------------------------------------------------------------
// Implementation of Assembler
@ -273,6 +301,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
@ -426,6 +455,17 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
}
void Assembler::arithmetic_op_32(byte opcode,
const Operand& dst,
Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(src, dst);
emit(opcode);
emit_operand(src, dst);
}
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
@ -470,8 +510,8 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0x83);
if (is_int8(src.value_)) {
emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
} else if (dst.is(rax)) {
@ -567,6 +607,23 @@ void Assembler::shift_32(Register dst, int subcode) {
}
void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
if (shift_amount.value_ == 1) {
emit_optional_rex_32(dst);
emit(0xD1);
emit_modrm(subcode, dst);
} else {
emit_optional_rex_32(dst);
emit(0xC1);
emit_modrm(subcode, dst);
emit(shift_amount.value_);
}
}
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -750,6 +807,15 @@ void Assembler::idiv(Register src) {
}
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(src);
emit(0xF7);
emit_modrm(0x5, src);
}
void Assembler::imul(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1058,6 +1124,19 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
if (rmode == RelocInfo::NONE) {
// Sadly, there is no zero or sign extending move for 8-bit immediates.
if (is_int32(value)) {
movq(dst, Immediate(static_cast<int32_t>(value)));
return;
} else if (is_uint32(value)) {
movl(dst, Immediate(static_cast<int32_t>(value)));
return;
}
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
}
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@ -1087,16 +1166,24 @@ void Assembler::movq(const Operand& dst, Immediate value) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
if (mode == RelocInfo::NONE) {
// There is no possible reason to store a heap pointer without relocation
// info, so it must be a smi.
ASSERT(value->IsSmi());
// Smis never have more than 32 significant bits, but they might
// have garbage in the high bits.
movq(dst,
Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
} else {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(value->IsHeapObject());
ASSERT(!Heap::InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
if (value->IsHeapObject()) {
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
} else {
ASSERT_EQ(RelocInfo::NONE, mode);
emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE);
}
}
@ -1439,7 +1526,7 @@ void Assembler::testb(Register reg, Immediate mask) {
last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA8);
emit(mask);
emit(mask.value_); // Low byte emitted.
} else {
if (reg.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@ -1463,6 +1550,15 @@ void Assembler::testb(const Operand& op, Immediate mask) {
}
void Assembler::testl(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x85);
emit_modrm(dst, src);
}
void Assembler::testl(Register reg, Immediate mask) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1551,6 +1647,7 @@ void Assembler::fldz() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
}
@ -1559,6 +1656,7 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
}
@ -1567,6 +1665,7 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
}
@ -1575,6 +1674,7 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(3, adr);
}
@ -1583,6 +1683,7 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
}
@ -1591,6 +1692,7 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
}
@ -1599,6 +1701,7 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
}
@ -1608,6 +1711,7 @@ void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(1, adr);
}
@ -1616,6 +1720,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
}
@ -1624,6 +1729,7 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(8, adr);
}
@ -1678,6 +1784,7 @@ void Assembler::fsub(int i) {
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(adr);
emit(0xDA);
emit_operand(4, adr);
}
@ -2001,11 +2108,11 @@ void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.code() << 3) | src.code());
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.code() << 3) | src.code());
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}

72
deps/v8/src/x64/assembler-x64.h

@ -160,6 +160,17 @@ struct XMMRegister {
return code_;
}
// Return the high bit of the register code as a 0 or 1. Used often
// when constructing the REX prefix byte.
int high_bit() const {
return code_ >> 3;
}
// Return the 3 low bits of the register code. Used when encoding registers
// in modR/M, SIB, and opcode bytes.
int low_bits() const {
return code_ & 0x7;
}
int code_;
};
@ -522,6 +533,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
@ -539,10 +554,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x0, dst, src);
}
void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@ -551,6 +562,26 @@ class Assembler : public Malloced {
immediate_arithmetic_op_8(0x7, dst, src);
}
void cmpl(Register dst, Register src) {
arithmetic_op_32(0x3B, dst, src);
}
void cmpl(Register dst, const Operand& src) {
arithmetic_op_32(0x3B, src, dst);
}
void cmpl(const Operand& dst, Register src) {
arithmetic_op_32(0x39, dst, src);
}
void cmpl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src);
}
@ -567,10 +598,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x7, dst, src);
}
void cmpl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x7, dst, src);
}
void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
@ -605,12 +632,13 @@ class Assembler : public Malloced {
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src);
void imul(Register dst, Register src);
void imul(Register dst, const Operand& src);
// Performs the operation dst = src * imm.
void imul(Register dst, Register src, Immediate imm);
// Signed multiply instructions.
void imul(Register src); // rdx:rax = rax * src.
void imul(Register dst, Register src); // dst = dst * src.
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Multiply 32 bit registers
void imull(Register dst, Register src);
void imull(Register dst, Register src); // dst = dst * src.
void incq(Register dst);
void incq(const Operand& dst);
@ -662,11 +690,22 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by shift_amount bits.
// Shifting by 1 is handled efficiently.
void sarl(Register dst, Immediate shift_amount) {
shift_32(dst, shift_amount, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sar(Register dst) {
shift(dst, 0x7);
}
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sarl(Register dst) {
shift_32(dst, 0x7);
}
void shl(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x4);
}
@ -722,8 +761,13 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x5, dst, src);
}
void subl(Register dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
@ -1070,6 +1114,7 @@ class Assembler : public Malloced {
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
void arithmetic_op_32(byte opcode, Register dst, Register src);
void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
void arithmetic_op(byte opcode, Register reg, const Operand& op);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
@ -1089,6 +1134,7 @@ class Assembler : public Malloced {
Immediate src);
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
void shift_32(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode);
void shift_32(Register dst, int subcode);

791
deps/v8/src/x64/codegen-x64.cc

File diff suppressed because it is too large

68
deps/v8/src/x64/codegen-x64.h

@ -294,15 +294,6 @@ class CodeGenerator: public AstVisitor {
Handle<Script> script,
bool is_eval);
// During implementation of CodeGenerator, this call creates a
// CodeGenerator instance, and calls GenCode on it with a null
// function literal. CodeGenerator will then construct and return
// a simple dummy function. Call this during bootstrapping before
// trying to compile any real functions, to get CodeGenerator up
// and running.
// TODO(X64): Remove once we can get through the bootstrapping process.
static void TestCodeGenerator();
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@ -432,6 +423,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@ -522,11 +514,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the value field of an object (used by Date).
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
@ -547,58 +543,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
// Only allow fast-case switch if the range of labels is at most
// this factor times the number of case labels.
// Value is derived from comparing the size of code generated by the normal
// switch code for Smi-labels to the size of a single pointer. If code
// quality increases this number should be decreased to match.
static const int kFastSwitchMaxOverheadFactor = 5;
// Minimal number of switch cases required before we allow jump-table
// optimization.
static const int kFastSwitchMinCaseCount = 5;
// The limit of the range of a fast-case switch, as a factor of the number
// of cases of the switch. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMaxOverheadFactor();
// The minimal number of cases in a switch before the fast-case switch
// optimization is enabled. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMinCaseCount();
// Allocate a jump table and create code to jump through it.
// Should call GenerateFastCaseSwitchCases to generate the code for
// all the cases at the appropriate point.
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* fail_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index);
// Fast support for constant-Smi switches. Tests whether switch statement
// permits optimization and calls GenerateFastCaseSwitch if it does.
// Returns true if the fast-case switch was generated, and false if not.
bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.

6
deps/v8/src/x64/debug-x64.cc

@ -38,8 +38,10 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
UNIMPLEMENTED();
return false;
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
// 11th byte of patch is 0x49, 11th byte of JS return is 0xCC (int3).
ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
return (*(rinfo->pc() + 10) == 0x49);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {

1398
deps/v8/src/x64/disasm-x64.cc

File diff suppressed because it is too large

7
deps/v8/src/x64/frames-x64.h

@ -59,12 +59,7 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
static const int kFunctionArgOffset = +3 * kPointerSize;
static const int kReceiverArgOffset = +4 * kPointerSize;
static const int kArgcOffset = +5 * kPointerSize;
static const int kArgvOffset = +6 * kPointerSize;
static const int kCallerFPOffset = -10 * kPointerSize;
};

6
deps/v8/src/x64/ic-x64.cc

@ -212,11 +212,9 @@ void CallIC::Generate(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &invoke);
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ movzxbq(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
__ cmpq(rcx, Immediate(static_cast<int8_t>(JS_GLOBAL_OBJECT_TYPE)));
__ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
__ cmpq(rcx, Immediate(static_cast<int8_t>(JS_BUILTINS_OBJECT_TYPE)));
__ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke);
// Patch the receiver on the stack.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save